Revert "Remove a bunch of unused dependencies (godep v54+ required)"
This reverts commit 9ed133ea01
.
This commit is contained in:
parent
9ed133ea01
commit
8a53385cbc
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"ImportPath": "github.com/mitchellh/packer",
|
||||
"GoVersion": "go1.6",
|
||||
"GoVersion": "go1.5",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/ActiveState/tail",
|
||||
|
@ -16,61 +16,6 @@
|
|||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/client",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/request",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/session",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
|
||||
"Comment": "v1.1.2",
|
||||
|
@ -81,36 +26,6 @@
|
|||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4",
|
||||
"Comment": "v1.1.2",
|
||||
|
@ -131,16 +46,6 @@
|
|||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3iface",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3manager",
|
||||
"Comment": "v1.1.2",
|
||||
"Rev": "8041be5461786460d86b4358305fbdf32d37cfb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/sts",
|
||||
"Comment": "v1.1.2",
|
||||
|
@ -287,10 +192,6 @@
|
|||
"ImportPath": "github.com/mitchellh/go-fs",
|
||||
"Rev": "a34c1b9334e86165685a9449b782f20465eb8c69"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/go-fs/fat",
|
||||
"Rev": "a34c1b9334e86165685a9449b782f20465eb8c69"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/go-homedir",
|
||||
"Rev": "d682a8f0cf139663a984ff12528da460ca963de9"
|
||||
|
@ -348,86 +249,6 @@
|
|||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/openstack",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/openstack/common/extensions",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/flavors",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/images",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/servers",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v2/tenants",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v2/tokens",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v3/tokens",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/openstack/utils",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/pagination",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/testhelper",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud/testhelper/client",
|
||||
"Comment": "v1.0.0-810-g53d1dc4",
|
||||
"Rev": "53d1dc4400e1ebcd37a0e01d8c1fe2f4db3b99d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/satori/go.uuid",
|
||||
"Rev": "d41af8bb6a7704f00bc3b7cba9355ae6a5a80048"
|
||||
|
@ -448,46 +269,18 @@
|
|||
"ImportPath": "golang.org/x/crypto/ssh",
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh/agent",
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "6ccd6698c634f5d835c40c1c31848729e0cecda1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
||||
"Rev": "6ccd6698c634f5d835c40c1c31848729e0cecda1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/html",
|
||||
"Rev": "6ccd6698c634f5d835c40c1c31848729e0cecda1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/html/atom",
|
||||
"Rev": "6ccd6698c634f5d835c40c1c31848729e0cecda1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2",
|
||||
"Rev": "8a57ed94ffd43444c0879fe75701732a38afc985"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/google",
|
||||
"Rev": "8a57ed94ffd43444c0879fe75701732a38afc985"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/internal",
|
||||
"Rev": "8a57ed94ffd43444c0879fe75701732a38afc985"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/jws",
|
||||
"Rev": "8a57ed94ffd43444c0879fe75701732a38afc985"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/jwt",
|
||||
"Rev": "8a57ed94ffd43444c0879fe75701732a38afc985"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "50c6bc5e4292a1d4e65c6e9be5f53be28bcbe28e"
|
||||
|
@ -504,42 +297,10 @@
|
|||
"ImportPath": "google.golang.org/api/googleapi",
|
||||
"Rev": "ddff2aff599105a55549cf173852507dfa094b7f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/googleapi/internal/uritemplates",
|
||||
"Rev": "ddff2aff599105a55549cf173852507dfa094b7f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine",
|
||||
"Rev": "6bde959377a90acb53366051d7d587bfd7171354"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal",
|
||||
"Rev": "6bde959377a90acb53366051d7d587bfd7171354"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/app_identity",
|
||||
"Rev": "6bde959377a90acb53366051d7d587bfd7171354"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/base",
|
||||
"Rev": "6bde959377a90acb53366051d7d587bfd7171354"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/datastore",
|
||||
"Rev": "6bde959377a90acb53366051d7d587bfd7171354"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/log",
|
||||
"Rev": "6bde959377a90acb53366051d7d587bfd7171354"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/modules",
|
||||
"Rev": "6bde959377a90acb53366051d7d587bfd7171354"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/remote_api",
|
||||
"Rev": "6bde959377a90acb53366051d7d587bfd7171354"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/cloud/compute/metadata",
|
||||
"Rev": "5a3b06f8b5da3b7c3a93da43163b872c86c509ef"
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
.test
|
||||
.go
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
language: go
|
||||
|
||||
script:
|
||||
- go test -race -v ./...
|
||||
|
||||
go:
|
||||
- 1.3.3
|
||||
- 1.4.3
|
||||
- 1.5.2
|
||||
|
||||
install:
|
||||
- go get gopkg.in/fsnotify.v1
|
||||
- go get gopkg.in/tomb.v1
|
|
@ -0,0 +1,57 @@
|
|||
# API current (gopkg.in/ActiveState/tail)
|
||||
|
||||
## July, 2015
|
||||
|
||||
* Fix inotify watcher leak; remove `Cleanup` (#51)
|
||||
|
||||
# API v0 (gopkg.in/ActiveState/tail.v0)
|
||||
|
||||
## June, 2015
|
||||
|
||||
* Don't return partial lines (PR #40)
|
||||
* Use stable version of fsnotify (#46)
|
||||
|
||||
## July, 2014
|
||||
|
||||
* Fix tail for Windows (PR #36)
|
||||
|
||||
## May, 2014
|
||||
|
||||
* Improved rate limiting using leaky bucket (PR #29)
|
||||
* Fix odd line splitting (PR #30)
|
||||
|
||||
## Apr, 2014
|
||||
|
||||
* LimitRate now discards read buffer (PR #28)
|
||||
* allow reading of longer lines if MaxLineSize is unset (PR #24)
|
||||
* updated deps.json to latest fsnotify (441bbc86b1)
|
||||
|
||||
## Feb, 2014
|
||||
|
||||
* added `Config.Logger` to suppress library logging
|
||||
|
||||
## Nov, 2013
|
||||
|
||||
* add Cleanup to remove leaky inotify watches (PR #20)
|
||||
|
||||
## Aug, 2013
|
||||
|
||||
* redesigned Location field (PR #12)
|
||||
* add tail.Tell (PR #14)
|
||||
|
||||
## July, 2013
|
||||
|
||||
* Rate limiting (PR #10)
|
||||
|
||||
## May, 2013
|
||||
|
||||
* Detect file deletions/renames in polling file watcher (PR #1)
|
||||
* Detect file truncation
|
||||
* Fix potential race condition when reopening the file (issue 5)
|
||||
* Fix potential blocking of `tail.Stop` (issue 4)
|
||||
* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop
|
||||
* Support Follow=false
|
||||
|
||||
## Feb, 2013
|
||||
|
||||
* Initial open source release
|
|
@ -0,0 +1,19 @@
|
|||
FROM golang
|
||||
|
||||
RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/
|
||||
ADD . $GOPATH/src/github.com/hpcloud/tail/
|
||||
|
||||
# expecting to fetch dependencies successfully.
|
||||
RUN go get -v github.com/hpcloud/tail
|
||||
|
||||
# expecting to run the test successfully.
|
||||
RUN go test -v github.com/hpcloud/tail
|
||||
|
||||
# expecting to install successfully
|
||||
RUN go install -v github.com/hpcloud/tail
|
||||
RUN go install -v github.com/hpcloud/tail/cmd/gotail
|
||||
|
||||
RUN $GOPATH/bin/gotail -h || true
|
||||
|
||||
ENV PATH $GOPATH/bin:$PATH
|
||||
CMD ["gotail"]
|
|
@ -0,0 +1,23 @@
|
|||
# This is the MIT license
|
||||
|
||||
# Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
default: test
|
||||
|
||||
test: *.go
|
||||
go test -v ./...
|
||||
|
||||
fmt:
|
||||
gofmt -w .
|
||||
|
||||
# Run the test in an isolated environment.
|
||||
fulltest:
|
||||
docker build -t hpcloud/tail .
|
|
@ -0,0 +1,27 @@
|
|||
[![Build Status](https://travis-ci.org/hpcloud/tail.svg)](https://travis-ci.org/hpcloud/tail)
|
||||
|
||||
# Go package for tail-ing files
|
||||
|
||||
A Go package striving to emulate the features of the BSD `tail` program.
|
||||
|
||||
```Go
|
||||
t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true})
|
||||
for line := range t.Lines {
|
||||
fmt.Println(line.Text)
|
||||
}
|
||||
```
|
||||
|
||||
See [API documentation](http://godoc.org/github.com/hpcloud/tail).
|
||||
|
||||
## Log rotation
|
||||
|
||||
Tail comes with full support for truncation/move detection as it is
|
||||
designed to work with log rotation tools.
|
||||
|
||||
## Installing
|
||||
|
||||
go get github.com/hpcloud/tail/...
|
||||
|
||||
## Windows support
|
||||
|
||||
This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support.
|
|
@ -0,0 +1 @@
|
|||
gotail
|
|
@ -0,0 +1,4 @@
|
|||
default: gotail
|
||||
|
||||
gotail: *.go ../../*.go
|
||||
go build
|
|
@ -0,0 +1,64 @@
|
|||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/hpcloud/tail"
|
||||
"os"
|
||||
)
|
||||
|
||||
func args2config() (tail.Config, int64) {
|
||||
config := tail.Config{Follow: true}
|
||||
n := int64(0)
|
||||
maxlinesize := int(0)
|
||||
flag.Int64Var(&n, "n", 0, "tail from the last Nth location")
|
||||
flag.IntVar(&maxlinesize, "max", 0, "max line size")
|
||||
flag.BoolVar(&config.Follow, "f", false, "wait for additional data to be appended to the file")
|
||||
flag.BoolVar(&config.ReOpen, "F", false, "follow, and track file rename/rotation")
|
||||
flag.BoolVar(&config.Poll, "p", false, "use polling, instead of inotify")
|
||||
flag.Parse()
|
||||
if config.ReOpen {
|
||||
config.Follow = true
|
||||
}
|
||||
config.MaxLineSize = maxlinesize
|
||||
return config, n
|
||||
}
|
||||
|
||||
func main() {
|
||||
config, n := args2config()
|
||||
if flag.NFlag() < 1 {
|
||||
fmt.Println("need one or more files as arguments")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if n != 0 {
|
||||
config.Location = &tail.SeekInfo{-n, os.SEEK_END}
|
||||
}
|
||||
|
||||
done := make(chan bool)
|
||||
for _, filename := range flag.Args() {
|
||||
go tailFile(filename, config, done)
|
||||
}
|
||||
|
||||
for _, _ = range flag.Args() {
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
func tailFile(filename string, config tail.Config, done chan bool) {
|
||||
defer func() { done <- true }()
|
||||
t, err := tail.TailFile(filename, config)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
for line := range t.Lines {
|
||||
fmt.Println(line.Text)
|
||||
}
|
||||
err = t.Wait()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
"github.com/howeyc/fsnotify": {
|
||||
"repo": "http://github.com/howeyc/fsnotify.git",
|
||||
"version": "441bbc86b167",
|
||||
"type": "git-clone",
|
||||
"alias": "github.com/howeyc/fsnotify"
|
||||
},
|
||||
"gopkg.in/tomb.v1": {
|
||||
"repo": "https://github.com/go-tomb/tomb.git",
|
||||
"version": "c131134a1947e9afd9cecfe11f4c6dff0732ae58",
|
||||
"type": "git-clone",
|
||||
"alias": "gopkg.in/tomb.v1"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
Copyright (C) 2013 99designs
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,97 @@
|
|||
// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
|
||||
package ratelimiter
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type LeakyBucket struct {
|
||||
Size uint16
|
||||
Fill float64
|
||||
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
|
||||
Lastupdate time.Time
|
||||
Now func() time.Time
|
||||
}
|
||||
|
||||
func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
|
||||
bucket := LeakyBucket{
|
||||
Size: size,
|
||||
Fill: 0,
|
||||
LeakInterval: leakInterval,
|
||||
Now: time.Now,
|
||||
Lastupdate: time.Now(),
|
||||
}
|
||||
|
||||
return &bucket
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) updateFill() {
|
||||
now := b.Now()
|
||||
if b.Fill > 0 {
|
||||
elapsed := now.Sub(b.Lastupdate)
|
||||
|
||||
b.Fill -= float64(elapsed) / float64(b.LeakInterval)
|
||||
if b.Fill < 0 {
|
||||
b.Fill = 0
|
||||
}
|
||||
}
|
||||
b.Lastupdate = now
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) Pour(amount uint16) bool {
|
||||
b.updateFill()
|
||||
|
||||
var newfill float64 = b.Fill + float64(amount)
|
||||
|
||||
if newfill > float64(b.Size) {
|
||||
return false
|
||||
}
|
||||
|
||||
b.Fill = newfill
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// The time at which this bucket will be completely drained
|
||||
func (b *LeakyBucket) DrainedAt() time.Time {
|
||||
return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
|
||||
}
|
||||
|
||||
// The duration until this bucket is completely drained
|
||||
func (b *LeakyBucket) TimeToDrain() time.Duration {
|
||||
return b.DrainedAt().Sub(b.Now())
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
|
||||
return b.Now().Sub(b.Lastupdate)
|
||||
}
|
||||
|
||||
type LeakyBucketSer struct {
|
||||
Size uint16
|
||||
Fill float64
|
||||
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
|
||||
Lastupdate time.Time
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) Serialise() *LeakyBucketSer {
|
||||
bucket := LeakyBucketSer{
|
||||
Size: b.Size,
|
||||
Fill: b.Fill,
|
||||
LeakInterval: b.LeakInterval,
|
||||
Lastupdate: b.Lastupdate,
|
||||
}
|
||||
|
||||
return &bucket
|
||||
}
|
||||
|
||||
func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
|
||||
bucket := LeakyBucket{
|
||||
Size: b.Size,
|
||||
Fill: b.Fill,
|
||||
LeakInterval: b.LeakInterval,
|
||||
Lastupdate: b.Lastupdate,
|
||||
Now: time.Now,
|
||||
}
|
||||
|
||||
return &bucket
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
package ratelimiter
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
const GC_SIZE int = 100
|
||||
|
||||
type Memory struct {
|
||||
store map[string]LeakyBucket
|
||||
lastGCCollected time.Time
|
||||
}
|
||||
|
||||
func NewMemory() *Memory {
|
||||
m := new(Memory)
|
||||
m.store = make(map[string]LeakyBucket)
|
||||
m.lastGCCollected = time.Now()
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
|
||||
|
||||
bucket, ok := m.store[key]
|
||||
if !ok {
|
||||
return nil, errors.New("miss")
|
||||
}
|
||||
|
||||
return &bucket, nil
|
||||
}
|
||||
|
||||
func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
|
||||
|
||||
if len(m.store) > GC_SIZE {
|
||||
m.GarbageCollect()
|
||||
}
|
||||
|
||||
m.store[key] = bucket
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Memory) GarbageCollect() {
|
||||
now := time.Now()
|
||||
|
||||
// rate limit GC to once per minute
|
||||
if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() {
|
||||
|
||||
for key, bucket := range m.store {
|
||||
// if the bucket is drained, then GC
|
||||
if bucket.DrainedAt().Unix() > now.Unix() {
|
||||
delete(m.store, key)
|
||||
}
|
||||
}
|
||||
|
||||
m.lastGCCollected = now
|
||||
}
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
package ratelimiter
|
||||
|
||||
type Storage interface {
|
||||
GetBucketFor(string) (*LeakyBucket, error)
|
||||
SetBucketFor(string, LeakyBucket) error
|
||||
}
|
|
@ -0,0 +1,419 @@
|
|||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hpcloud/tail/ratelimiter"
|
||||
"github.com/hpcloud/tail/util"
|
||||
"github.com/hpcloud/tail/watch"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrStop = fmt.Errorf("tail should now stop")
|
||||
)
|
||||
|
||||
type Line struct {
|
||||
Text string
|
||||
Time time.Time
|
||||
Err error // Error from tail
|
||||
}
|
||||
|
||||
// NewLine returns a Line with present time.
|
||||
func NewLine(text string) *Line {
|
||||
return &Line{text, time.Now(), nil}
|
||||
}
|
||||
|
||||
// SeekInfo represents arguments to `os.Seek`
|
||||
type SeekInfo struct {
|
||||
Offset int64
|
||||
Whence int // os.SEEK_*
|
||||
}
|
||||
|
||||
type logger interface {
|
||||
Fatal(v ...interface{})
|
||||
Fatalf(format string, v ...interface{})
|
||||
Fatalln(v ...interface{})
|
||||
Panic(v ...interface{})
|
||||
Panicf(format string, v ...interface{})
|
||||
Panicln(v ...interface{})
|
||||
Print(v ...interface{})
|
||||
Printf(format string, v ...interface{})
|
||||
Println(v ...interface{})
|
||||
}
|
||||
|
||||
// Config is used to specify how a file must be tailed.
|
||||
type Config struct {
|
||||
// File-specifc
|
||||
Location *SeekInfo // Seek to this location before tailing
|
||||
ReOpen bool // Reopen recreated files (tail -F)
|
||||
MustExist bool // Fail early if the file does not exist
|
||||
Poll bool // Poll for file changes instead of using inotify
|
||||
Pipe bool // Is a named pipe (mkfifo)
|
||||
RateLimiter *ratelimiter.LeakyBucket
|
||||
|
||||
// Generic IO
|
||||
Follow bool // Continue looking for new lines (tail -f)
|
||||
MaxLineSize int // If non-zero, split longer lines into multiple lines
|
||||
|
||||
// Logger, when nil, is set to tail.DefaultLogger
|
||||
// To disable logging: set field to tail.DiscardingLogger
|
||||
Logger logger
|
||||
}
|
||||
|
||||
type Tail struct {
|
||||
Filename string
|
||||
Lines chan *Line
|
||||
Config
|
||||
|
||||
file *os.File
|
||||
reader *bufio.Reader
|
||||
|
||||
watcher watch.FileWatcher
|
||||
changes *watch.FileChanges
|
||||
|
||||
tomb.Tomb // provides: Done, Kill, Dying
|
||||
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultLogger is used when Config.Logger == nil
|
||||
DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
// DiscardingLogger can be used to disable logging output
|
||||
DiscardingLogger = log.New(ioutil.Discard, "", 0)
|
||||
)
|
||||
|
||||
// TailFile begins tailing the file. Output stream is made available
|
||||
// via the `Tail.Lines` channel. To handle errors during tailing,
|
||||
// invoke the `Wait` or `Err` method after finishing reading from the
|
||||
// `Lines` channel.
|
||||
func TailFile(filename string, config Config) (*Tail, error) {
|
||||
if config.ReOpen && !config.Follow {
|
||||
util.Fatal("cannot set ReOpen without Follow.")
|
||||
}
|
||||
|
||||
t := &Tail{
|
||||
Filename: filename,
|
||||
Lines: make(chan *Line),
|
||||
Config: config,
|
||||
}
|
||||
|
||||
// when Logger was not specified in config, use default logger
|
||||
if t.Logger == nil {
|
||||
t.Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
}
|
||||
|
||||
if t.Poll {
|
||||
t.watcher = watch.NewPollingFileWatcher(filename)
|
||||
} else {
|
||||
t.watcher = watch.NewInotifyFileWatcher(filename)
|
||||
}
|
||||
|
||||
if t.MustExist {
|
||||
var err error
|
||||
t.file, err = OpenFile(t.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
go t.tailFileSync()
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Return the file's current position, like stdio's ftell().
|
||||
// But this value is not very accurate.
|
||||
// it may readed one line in the chan(tail.Lines),
|
||||
// so it may lost one line.
|
||||
func (tail *Tail) Tell() (offset int64, err error) {
|
||||
if tail.file == nil {
|
||||
return
|
||||
}
|
||||
offset, err = tail.file.Seek(0, os.SEEK_CUR)
|
||||
if err == nil {
|
||||
tail.lk.Lock()
|
||||
offset -= int64(tail.reader.Buffered())
|
||||
tail.lk.Unlock()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Stop stops the tailing activity.
|
||||
func (tail *Tail) Stop() error {
|
||||
tail.Kill(nil)
|
||||
return tail.Wait()
|
||||
}
|
||||
|
||||
func (tail *Tail) close() {
|
||||
close(tail.Lines)
|
||||
tail.colseFile()
|
||||
}
|
||||
|
||||
func (tail *Tail) colseFile() {
|
||||
if tail.file != nil {
|
||||
tail.file.Close()
|
||||
tail.file = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (tail *Tail) reopen() error {
|
||||
tail.colseFile()
|
||||
for {
|
||||
var err error
|
||||
tail.file, err = OpenFile(tail.Filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
tail.Logger.Printf("Waiting for %s to appear...", tail.Filename)
|
||||
if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {
|
||||
if err == tomb.ErrDying {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tail *Tail) readLine() (string, error) {
|
||||
tail.lk.Lock()
|
||||
line, err := tail.reader.ReadString('\n')
|
||||
tail.lk.Unlock()
|
||||
if err != nil {
|
||||
// Note ReadString "returns the data read before the error" in
|
||||
// case of an error, including EOF, so we return it as is. The
|
||||
// caller is expected to process it if err is EOF.
|
||||
return line, err
|
||||
}
|
||||
|
||||
line = strings.TrimRight(line, "\n")
|
||||
|
||||
return line, err
|
||||
}
|
||||
|
||||
func (tail *Tail) tailFileSync() {
|
||||
defer tail.Done()
|
||||
defer tail.close()
|
||||
|
||||
if !tail.MustExist {
|
||||
// deferred first open.
|
||||
err := tail.reopen()
|
||||
if err != nil {
|
||||
if err != tomb.ErrDying {
|
||||
tail.Kill(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Seek to requested location on first open of the file.
|
||||
if tail.Location != nil {
|
||||
_, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
|
||||
tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location)
|
||||
if err != nil {
|
||||
tail.Killf("Seek error on %s: %s", tail.Filename, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
tail.openReader()
|
||||
|
||||
var offset int64 = 0
|
||||
var err error
|
||||
// Read line by line.
|
||||
for {
|
||||
// do not seek in named pipes
|
||||
if !tail.Pipe {
|
||||
// grab the position in case we need to back up in the event of a half-line
|
||||
offset, err = tail.Tell()
|
||||
if err != nil {
|
||||
tail.Kill(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
line, err := tail.readLine()
|
||||
|
||||
// Process `line` even if err is EOF.
|
||||
if err == nil {
|
||||
cooloff := !tail.sendLine(line)
|
||||
if cooloff {
|
||||
// Wait a second before seeking till the end of
|
||||
// file when rate limit is reached.
|
||||
msg := fmt.Sprintf(
|
||||
"Too much log activity; waiting a second " +
|
||||
"before resuming tailing")
|
||||
tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)}
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
case <-tail.Dying():
|
||||
return
|
||||
}
|
||||
err = tail.seekEnd()
|
||||
if err != nil {
|
||||
tail.Kill(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else if err == io.EOF {
|
||||
if !tail.Follow {
|
||||
if line != "" {
|
||||
tail.sendLine(line)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if tail.Follow && line != "" {
|
||||
// this has the potential to never return the last line if
|
||||
// it's not followed by a newline; seems a fair trade here
|
||||
err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0})
|
||||
if err != nil {
|
||||
tail.Kill(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// When EOF is reached, wait for more data to become
|
||||
// available. Wait strategy is based on the `tail.watcher`
|
||||
// implementation (inotify or polling).
|
||||
err := tail.waitForChanges()
|
||||
if err != nil {
|
||||
if err != ErrStop {
|
||||
tail.Kill(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// non-EOF error
|
||||
tail.Killf("Error reading %s: %s", tail.Filename, err)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-tail.Dying():
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// waitForChanges waits until the file has been appended, deleted,
|
||||
// moved or truncated. When moved or deleted - the file will be
|
||||
// reopened if ReOpen is true. Truncated files are always reopened.
|
||||
func (tail *Tail) waitForChanges() error {
|
||||
if tail.changes == nil {
|
||||
pos, err := tail.file.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-tail.changes.Modified:
|
||||
return nil
|
||||
case <-tail.changes.Deleted:
|
||||
tail.changes = nil
|
||||
if tail.ReOpen {
|
||||
// XXX: we must not log from a library.
|
||||
tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename)
|
||||
if err := tail.reopen(); err != nil {
|
||||
return err
|
||||
}
|
||||
tail.Logger.Printf("Successfully reopened %s", tail.Filename)
|
||||
tail.openReader()
|
||||
return nil
|
||||
} else {
|
||||
tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
|
||||
return ErrStop
|
||||
}
|
||||
case <-tail.changes.Truncated:
|
||||
// Always reopen truncated files (Follow is true)
|
||||
tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
|
||||
if err := tail.reopen(); err != nil {
|
||||
return err
|
||||
}
|
||||
tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename)
|
||||
tail.openReader()
|
||||
return nil
|
||||
case <-tail.Dying():
|
||||
return ErrStop
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (tail *Tail) openReader() {
|
||||
if tail.MaxLineSize > 0 {
|
||||
// add 2 to account for newline characters
|
||||
tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
|
||||
} else {
|
||||
tail.reader = bufio.NewReader(tail.file)
|
||||
}
|
||||
}
|
||||
|
||||
func (tail *Tail) seekEnd() error {
|
||||
return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END})
|
||||
}
|
||||
|
||||
func (tail *Tail) seekTo(pos SeekInfo) error {
|
||||
_, err := tail.file.Seek(pos.Offset, pos.Whence)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Seek error on %s: %s", tail.Filename, err)
|
||||
}
|
||||
// Reset the read buffer whenever the file is re-seek'ed
|
||||
tail.reader.Reset(tail.file)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendLine sends the line(s) to Lines channel, splitting longer lines
|
||||
// if necessary. Return false if rate limit is reached.
|
||||
func (tail *Tail) sendLine(line string) bool {
|
||||
now := time.Now()
|
||||
lines := []string{line}
|
||||
|
||||
// Split longer lines
|
||||
if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {
|
||||
lines = util.PartitionString(line, tail.MaxLineSize)
|
||||
}
|
||||
|
||||
for _, line := range lines {
|
||||
tail.Lines <- &Line{line, now, nil}
|
||||
}
|
||||
|
||||
if tail.Config.RateLimiter != nil {
|
||||
ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
|
||||
if !ok {
|
||||
tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n",
|
||||
tail.Filename)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Cleanup removes inotify watches added by the tail package. This function is
|
||||
// meant to be invoked from a process's exit handler. Linux kernel may not
|
||||
// automatically remove inotify watches after the process exits.
|
||||
func (tail *Tail) Cleanup() {
|
||||
watch.Cleanup(tail.Filename)
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
// +build linux darwin freebsd netbsd openbsd
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func OpenFile(name string) (file *os.File, err error) {
|
||||
return os.Open(name)
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
// +build windows
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
"github.com/hpcloud/tail/winfile"
|
||||
"os"
|
||||
)
|
||||
|
||||
func OpenFile(name string) (file *os.File, err error) {
|
||||
return winfile.OpenFile(name, os.O_RDONLY, 0)
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
*log.Logger
|
||||
}
|
||||
|
||||
var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
|
||||
|
||||
// fatal is like panic except it displays only the current goroutine's stack.
|
||||
func Fatal(format string, v ...interface{}) {
|
||||
// https://github.com/ActiveState/log/blob/master/log.go#L45
|
||||
LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// partitionString partitions the string into chunks of given size,
|
||||
// with the last chunk of variable size.
|
||||
func PartitionString(s string, chunkSize int) []string {
|
||||
if chunkSize <= 0 {
|
||||
panic("invalid chunkSize")
|
||||
}
|
||||
length := len(s)
|
||||
chunks := 1 + length/chunkSize
|
||||
start := 0
|
||||
end := chunkSize
|
||||
parts := make([]string, 0, chunks)
|
||||
for {
|
||||
if end > length {
|
||||
end = length
|
||||
}
|
||||
parts = append(parts, s[start:end])
|
||||
if end == length {
|
||||
break
|
||||
}
|
||||
start, end = end, end+chunkSize
|
||||
}
|
||||
return parts
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
package watch
|
||||
|
||||
type FileChanges struct {
|
||||
Modified chan bool // Channel to get notified of modifications
|
||||
Truncated chan bool // Channel to get notified of truncations
|
||||
Deleted chan bool // Channel to get notified of deletions/renames
|
||||
}
|
||||
|
||||
func NewFileChanges() *FileChanges {
|
||||
return &FileChanges{
|
||||
make(chan bool), make(chan bool), make(chan bool)}
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyModified() {
|
||||
sendOnlyIfEmpty(fc.Modified)
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyTruncated() {
|
||||
sendOnlyIfEmpty(fc.Truncated)
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyDeleted() {
|
||||
sendOnlyIfEmpty(fc.Deleted)
|
||||
}
|
||||
|
||||
func (fc *FileChanges) Close() {
|
||||
close(fc.Modified)
|
||||
close(fc.Truncated)
|
||||
close(fc.Deleted)
|
||||
}
|
||||
|
||||
// sendOnlyIfEmpty sends on a bool channel only if the channel has no
|
||||
// backlog to be read by other goroutines. This concurrency pattern
|
||||
// can be used to notify other goroutines if and only if they are
|
||||
// looking for it (i.e., subsequent notifications can be compressed
|
||||
// into one).
|
||||
func sendOnlyIfEmpty(ch chan bool) {
|
||||
select {
|
||||
case ch <- true:
|
||||
default:
|
||||
}
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/hpcloud/tail/util"
|
||||
|
||||
"gopkg.in/fsnotify.v1"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
// InotifyFileWatcher uses inotify to monitor file changes.
|
||||
type InotifyFileWatcher struct {
|
||||
Filename string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
|
||||
fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
|
||||
return fw
|
||||
}
|
||||
|
||||
func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
|
||||
err := WatchCreate(fw.Filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer RemoveWatchCreate(fw.Filename)
|
||||
|
||||
// Do a real check now as the file might have been created before
|
||||
// calling `WatchFlags` above.
|
||||
if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
|
||||
// file exists, or stat returned an error.
|
||||
return err
|
||||
}
|
||||
|
||||
events := Events(fw.Filename)
|
||||
|
||||
for {
|
||||
select {
|
||||
case evt, ok := <-events:
|
||||
if !ok {
|
||||
return fmt.Errorf("inotify watcher has been closed")
|
||||
} else if filepath.Clean(evt.Name) == fw.Filename {
|
||||
return nil
|
||||
}
|
||||
case <-t.Dying():
|
||||
return tomb.ErrDying
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
|
||||
err := Watch(fw.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
changes := NewFileChanges()
|
||||
fw.Size = pos
|
||||
|
||||
go func() {
|
||||
defer RemoveWatch(fw.Filename)
|
||||
defer changes.Close()
|
||||
|
||||
events := Events(fw.Filename)
|
||||
|
||||
for {
|
||||
prevSize := fw.Size
|
||||
|
||||
var evt fsnotify.Event
|
||||
var ok bool
|
||||
|
||||
select {
|
||||
case evt, ok = <-events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
case <-t.Dying():
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case evt.Op&fsnotify.Remove == fsnotify.Remove:
|
||||
fallthrough
|
||||
|
||||
case evt.Op&fsnotify.Rename == fsnotify.Rename:
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
|
||||
case evt.Op&fsnotify.Write == fsnotify.Write:
|
||||
fi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
// XXX: report this error back to the user
|
||||
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
|
||||
}
|
||||
fw.Size = fi.Size()
|
||||
|
||||
if prevSize > 0 && prevSize > fw.Size {
|
||||
changes.NotifyTruncated()
|
||||
} else {
|
||||
changes.NotifyModified()
|
||||
}
|
||||
prevSize = fw.Size
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return changes, nil
|
||||
}
|
|
@ -0,0 +1,254 @@
|
|||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/hpcloud/tail/util"
|
||||
|
||||
"gopkg.in/fsnotify.v1"
|
||||
)
|
||||
|
||||
type InotifyTracker struct {
|
||||
mux sync.Mutex
|
||||
watcher *fsnotify.Watcher
|
||||
chans map[string]chan fsnotify.Event
|
||||
done map[string]chan bool
|
||||
watchNums map[string]int
|
||||
watch chan *watchInfo
|
||||
remove chan *watchInfo
|
||||
error chan error
|
||||
}
|
||||
|
||||
type watchInfo struct {
|
||||
op fsnotify.Op
|
||||
fname string
|
||||
}
|
||||
|
||||
func (this *watchInfo) isCreate() bool {
|
||||
return this.op == fsnotify.Create
|
||||
}
|
||||
|
||||
var (
|
||||
// globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
|
||||
shared *InotifyTracker
|
||||
|
||||
// these are used to ensure the shared InotifyTracker is run exactly once
|
||||
once = sync.Once{}
|
||||
goRun = func() {
|
||||
shared = &InotifyTracker{
|
||||
mux: sync.Mutex{},
|
||||
chans: make(map[string]chan fsnotify.Event),
|
||||
done: make(map[string]chan bool),
|
||||
watchNums: make(map[string]int),
|
||||
watch: make(chan *watchInfo),
|
||||
remove: make(chan *watchInfo),
|
||||
error: make(chan error),
|
||||
}
|
||||
go shared.run()
|
||||
}
|
||||
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
)
|
||||
|
||||
// Watch signals the run goroutine to begin watching the input filename
|
||||
func Watch(fname string) error {
|
||||
return watch(&watchInfo{
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
// Watch create signals the run goroutine to begin watching the input filename
|
||||
// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
|
||||
func WatchCreate(fname string) error {
|
||||
return watch(&watchInfo{
|
||||
op: fsnotify.Create,
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
func watch(winfo *watchInfo) error {
|
||||
// start running the shared InotifyTracker if not already running
|
||||
once.Do(goRun)
|
||||
|
||||
winfo.fname = filepath.Clean(winfo.fname)
|
||||
shared.watch <- winfo
|
||||
return <-shared.error
|
||||
}
|
||||
|
||||
// RemoveWatch signals the run goroutine to remove the watch for the input filename
|
||||
func RemoveWatch(fname string) {
|
||||
remove(&watchInfo{
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
// RemoveWatch create signals the run goroutine to remove the watch for the input filename
|
||||
func RemoveWatchCreate(fname string) {
|
||||
remove(&watchInfo{
|
||||
op: fsnotify.Create,
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
func remove(winfo *watchInfo) {
|
||||
// start running the shared InotifyTracker if not already running
|
||||
once.Do(goRun)
|
||||
|
||||
winfo.fname = filepath.Clean(winfo.fname)
|
||||
shared.mux.Lock()
|
||||
done := shared.done[winfo.fname]
|
||||
if done != nil {
|
||||
delete(shared.done, winfo.fname)
|
||||
close(done)
|
||||
}
|
||||
shared.mux.Unlock()
|
||||
|
||||
shared.remove <- winfo
|
||||
}
|
||||
|
||||
// Events returns a channel to which FileEvents corresponding to the input filename
|
||||
// will be sent. This channel will be closed when removeWatch is called on this
|
||||
// filename.
|
||||
func Events(fname string) chan fsnotify.Event {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
return shared.chans[fname]
|
||||
}
|
||||
|
||||
// Cleanup removes the watch for the input filename if necessary.
|
||||
func Cleanup(fname string) {
|
||||
RemoveWatch(fname)
|
||||
}
|
||||
|
||||
// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
|
||||
// a new Watcher if the previous Watcher was closed.
|
||||
func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
if shared.chans[winfo.fname] == nil {
|
||||
shared.chans[winfo.fname] = make(chan fsnotify.Event)
|
||||
shared.done[winfo.fname] = make(chan bool)
|
||||
}
|
||||
|
||||
fname := winfo.fname
|
||||
if winfo.isCreate() {
|
||||
// Watch for new files to be created in the parent directory.
|
||||
fname = filepath.Dir(fname)
|
||||
}
|
||||
|
||||
// already in inotify watch
|
||||
if shared.watchNums[fname] > 0 {
|
||||
shared.watchNums[fname]++
|
||||
if winfo.isCreate() {
|
||||
shared.watchNums[winfo.fname]++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err := shared.watcher.Add(fname)
|
||||
if err == nil {
|
||||
shared.watchNums[fname]++
|
||||
if winfo.isCreate() {
|
||||
shared.watchNums[winfo.fname]++
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
|
||||
// corresponding events channel.
|
||||
func (shared *InotifyTracker) removeWatch(winfo *watchInfo) {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
ch := shared.chans[winfo.fname]
|
||||
if ch == nil {
|
||||
return
|
||||
}
|
||||
|
||||
fname := winfo.fname
|
||||
if winfo.isCreate() {
|
||||
// Watch for new files to be created in the parent directory.
|
||||
fname = filepath.Dir(fname)
|
||||
}
|
||||
|
||||
shared.watchNums[fname]--
|
||||
if shared.watchNums[fname] == 0 {
|
||||
delete(shared.watchNums, fname)
|
||||
// TODO: handle error
|
||||
shared.watcher.Remove(fname)
|
||||
}
|
||||
|
||||
delete(shared.chans, winfo.fname)
|
||||
close(ch)
|
||||
|
||||
if !winfo.isCreate() {
|
||||
return
|
||||
}
|
||||
|
||||
shared.watchNums[winfo.fname]--
|
||||
if shared.watchNums[winfo.fname] == 0 {
|
||||
delete(shared.watchNums, winfo.fname)
|
||||
}
|
||||
}
|
||||
|
||||
// sendEvent sends the input event to the appropriate Tail.
|
||||
func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
|
||||
name := filepath.Clean(event.Name)
|
||||
|
||||
shared.mux.Lock()
|
||||
ch := shared.chans[name]
|
||||
done := shared.done[name]
|
||||
shared.mux.Unlock()
|
||||
|
||||
if ch != nil && done != nil {
|
||||
select {
|
||||
case ch <- event:
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// run starts the goroutine in which the shared struct reads events from its
|
||||
// Watcher's Event channel and sends the events to the appropriate Tail.
|
||||
func (shared *InotifyTracker) run() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
util.Fatal("failed to create Watcher")
|
||||
}
|
||||
shared.watcher = watcher
|
||||
|
||||
for {
|
||||
select {
|
||||
case winfo := <-shared.watch:
|
||||
shared.error <- shared.addWatch(winfo)
|
||||
|
||||
case winfo := <-shared.remove:
|
||||
shared.removeWatch(winfo)
|
||||
|
||||
case event, open := <-shared.watcher.Events:
|
||||
if !open {
|
||||
return
|
||||
}
|
||||
shared.sendEvent(event)
|
||||
|
||||
case err, open := <-shared.watcher.Errors:
|
||||
if !open {
|
||||
return
|
||||
} else if err != nil {
|
||||
sysErr, ok := err.(*os.SyscallError)
|
||||
if !ok || sysErr.Err != syscall.EINTR {
|
||||
logger.Printf("Error in Watcher Error channel: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/hpcloud/tail/util"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
// PollingFileWatcher polls the file for changes.
|
||||
type PollingFileWatcher struct {
|
||||
Filename string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func NewPollingFileWatcher(filename string) *PollingFileWatcher {
|
||||
fw := &PollingFileWatcher{filename, 0}
|
||||
return fw
|
||||
}
|
||||
|
||||
var POLL_DURATION time.Duration
|
||||
|
||||
func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
|
||||
for {
|
||||
if _, err := os.Stat(fw.Filename); err == nil {
|
||||
return nil
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-time.After(POLL_DURATION):
|
||||
continue
|
||||
case <-t.Dying():
|
||||
return tomb.ErrDying
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
|
||||
origFi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
changes := NewFileChanges()
|
||||
var prevModTime time.Time
|
||||
|
||||
// XXX: use tomb.Tomb to cleanly manage these goroutines. replace
|
||||
// the fatal (below) with tomb's Kill.
|
||||
|
||||
fw.Size = pos
|
||||
|
||||
go func() {
|
||||
defer changes.Close()
|
||||
|
||||
prevSize := fw.Size
|
||||
for {
|
||||
select {
|
||||
case <-t.Dying():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
time.Sleep(POLL_DURATION)
|
||||
fi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
// Windows cannot delete a file if a handle is still open (tail keeps one open)
|
||||
// so it gives access denied to anything trying to read it until all handles are released.
|
||||
if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
|
||||
// File does not exist (has been deleted).
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
|
||||
// XXX: report this error back to the user
|
||||
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
|
||||
}
|
||||
|
||||
// File got moved/renamed?
|
||||
if !os.SameFile(origFi, fi) {
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
|
||||
// File got truncated?
|
||||
fw.Size = fi.Size()
|
||||
if prevSize > 0 && prevSize > fw.Size {
|
||||
changes.NotifyTruncated()
|
||||
prevSize = fw.Size
|
||||
continue
|
||||
}
|
||||
// File got bigger?
|
||||
if prevSize > 0 && prevSize < fw.Size {
|
||||
changes.NotifyModified()
|
||||
prevSize = fw.Size
|
||||
continue
|
||||
}
|
||||
prevSize = fw.Size
|
||||
|
||||
// File was appended to (changed)?
|
||||
modTime := fi.ModTime()
|
||||
if modTime != prevModTime {
|
||||
prevModTime = modTime
|
||||
changes.NotifyModified()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
POLL_DURATION = 250 * time.Millisecond
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import "gopkg.in/tomb.v1"
|
||||
|
||||
// FileWatcher monitors file-level events.
|
||||
type FileWatcher interface {
|
||||
// BlockUntilExists blocks until the file comes into existence.
|
||||
BlockUntilExists(*tomb.Tomb) error
|
||||
|
||||
// ChangeEvents reports on changes to a file, be it modification,
|
||||
// deletion, renames or truncations. Returned FileChanges group of
|
||||
// channels will be closed, thus become unusable, after a deletion
|
||||
// or truncation event.
|
||||
// In order to properly report truncations, ChangeEvents requires
|
||||
// the caller to pass their current offset in the file.
|
||||
ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
// +build windows
|
||||
|
||||
package winfile
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// issue also described here
|
||||
//https://codereview.appspot.com/8203043/
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218
|
||||
func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) {
|
||||
if len(path) == 0 {
|
||||
return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
|
||||
}
|
||||
pathp, err := syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return syscall.InvalidHandle, err
|
||||
}
|
||||
var access uint32
|
||||
switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
|
||||
case syscall.O_RDONLY:
|
||||
access = syscall.GENERIC_READ
|
||||
case syscall.O_WRONLY:
|
||||
access = syscall.GENERIC_WRITE
|
||||
case syscall.O_RDWR:
|
||||
access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
|
||||
}
|
||||
if mode&syscall.O_CREAT != 0 {
|
||||
access |= syscall.GENERIC_WRITE
|
||||
}
|
||||
if mode&syscall.O_APPEND != 0 {
|
||||
access &^= syscall.GENERIC_WRITE
|
||||
access |= syscall.FILE_APPEND_DATA
|
||||
}
|
||||
sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
|
||||
var sa *syscall.SecurityAttributes
|
||||
if mode&syscall.O_CLOEXEC == 0 {
|
||||
sa = makeInheritSa()
|
||||
}
|
||||
var createmode uint32
|
||||
switch {
|
||||
case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
|
||||
createmode = syscall.CREATE_NEW
|
||||
case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
|
||||
createmode = syscall.CREATE_ALWAYS
|
||||
case mode&syscall.O_CREAT == syscall.O_CREAT:
|
||||
createmode = syscall.OPEN_ALWAYS
|
||||
case mode&syscall.O_TRUNC == syscall.O_TRUNC:
|
||||
createmode = syscall.TRUNCATE_EXISTING
|
||||
default:
|
||||
createmode = syscall.OPEN_EXISTING
|
||||
}
|
||||
h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
|
||||
return h, e
|
||||
}
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211
|
||||
func makeInheritSa() *syscall.SecurityAttributes {
|
||||
var sa syscall.SecurityAttributes
|
||||
sa.Length = uint32(unsafe.Sizeof(sa))
|
||||
sa.InheritHandle = 1
|
||||
return &sa
|
||||
}
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133
|
||||
func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
|
||||
r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return os.NewFile(uintptr(r), name), nil
|
||||
}
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61
|
||||
func syscallMode(i os.FileMode) (o uint32) {
|
||||
o |= uint32(i.Perm())
|
||||
if i&os.ModeSetuid != 0 {
|
||||
o |= syscall.S_ISUID
|
||||
}
|
||||
if i&os.ModeSetgid != 0 {
|
||||
o |= syscall.S_ISGID
|
||||
}
|
||||
if i&os.ModeSticky != 0 {
|
||||
o |= syscall.S_ISVTX
|
||||
}
|
||||
// No mapping for Go's ModeTemporary (plan9 only).
|
||||
return
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
|
@ -0,0 +1,3 @@
|
|||
language: go
|
||||
go:
|
||||
- tip
|
|
@ -0,0 +1,20 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Armon Dadgar
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,38 @@
|
|||
go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix)
|
||||
=========
|
||||
|
||||
Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
|
||||
The package only provides a single `Tree` implementation, optimized for sparse nodes.
|
||||
|
||||
As a radix tree, it provides the following:
|
||||
* O(k) operations. In many cases, this can be faster than a hash table since
|
||||
the hash function is an O(k) operation, and hash tables have very poor cache locality.
|
||||
* Minimum / Maximum value lookups
|
||||
* Ordered iteration
|
||||
|
||||
For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix).
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix).
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
Below is a simple example of usage
|
||||
|
||||
```go
|
||||
// Create a tree
|
||||
r := radix.New()
|
||||
r.Insert("foo", 1)
|
||||
r.Insert("bar", 2)
|
||||
r.Insert("foobar", 2)
|
||||
|
||||
// Find the longest prefix match
|
||||
m, _, _ := r.LongestPrefix("foozip")
|
||||
if m != "foo" {
|
||||
panic("should be foo")
|
||||
}
|
||||
```
|
||||
|
|
@ -0,0 +1,496 @@
|
|||
package radix
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// WalkFn is used when walking the tree. Takes a
|
||||
// key and value, returning if iteration should
|
||||
// be terminated.
|
||||
type WalkFn func(s string, v interface{}) bool
|
||||
|
||||
// leafNode is used to represent a value
|
||||
type leafNode struct {
|
||||
key string
|
||||
val interface{}
|
||||
}
|
||||
|
||||
// edge is used to represent an edge node
|
||||
type edge struct {
|
||||
label byte
|
||||
node *node
|
||||
}
|
||||
|
||||
type node struct {
|
||||
// leaf is used to store possible leaf
|
||||
leaf *leafNode
|
||||
|
||||
// prefix is the common prefix we ignore
|
||||
prefix string
|
||||
|
||||
// Edges should be stored in-order for iteration.
|
||||
// We avoid a fully materialized slice to save memory,
|
||||
// since in most cases we expect to be sparse
|
||||
edges edges
|
||||
}
|
||||
|
||||
func (n *node) isLeaf() bool {
|
||||
return n.leaf != nil
|
||||
}
|
||||
|
||||
func (n *node) addEdge(e edge) {
|
||||
n.edges = append(n.edges, e)
|
||||
n.edges.Sort()
|
||||
}
|
||||
|
||||
func (n *node) replaceEdge(e edge) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= e.label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == e.label {
|
||||
n.edges[idx].node = e.node
|
||||
return
|
||||
}
|
||||
panic("replacing missing edge")
|
||||
}
|
||||
|
||||
func (n *node) getEdge(label byte) *node {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == label {
|
||||
return n.edges[idx].node
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *node) delEdge(label byte) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == label {
|
||||
copy(n.edges[idx:], n.edges[idx+1:])
|
||||
n.edges[len(n.edges)-1] = edge{}
|
||||
n.edges = n.edges[:len(n.edges)-1]
|
||||
}
|
||||
}
|
||||
|
||||
type edges []edge
|
||||
|
||||
func (e edges) Len() int {
|
||||
return len(e)
|
||||
}
|
||||
|
||||
func (e edges) Less(i, j int) bool {
|
||||
return e[i].label < e[j].label
|
||||
}
|
||||
|
||||
func (e edges) Swap(i, j int) {
|
||||
e[i], e[j] = e[j], e[i]
|
||||
}
|
||||
|
||||
func (e edges) Sort() {
|
||||
sort.Sort(e)
|
||||
}
|
||||
|
||||
// Tree implements a radix tree. This can be treated as a
|
||||
// Dictionary abstract data type. The main advantage over
|
||||
// a standard hash map is prefix-based lookups and
|
||||
// ordered iteration,
|
||||
type Tree struct {
|
||||
root *node
|
||||
size int
|
||||
}
|
||||
|
||||
// New returns an empty Tree
|
||||
func New() *Tree {
|
||||
return NewFromMap(nil)
|
||||
}
|
||||
|
||||
// NewFromMap returns a new tree containing the keys
|
||||
// from an existing map
|
||||
func NewFromMap(m map[string]interface{}) *Tree {
|
||||
t := &Tree{root: &node{}}
|
||||
for k, v := range m {
|
||||
t.Insert(k, v)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Len is used to return the number of elements in the tree
|
||||
func (t *Tree) Len() int {
|
||||
return t.size
|
||||
}
|
||||
|
||||
// longestPrefix finds the length of the shared prefix
|
||||
// of two strings
|
||||
func longestPrefix(k1, k2 string) int {
|
||||
max := len(k1)
|
||||
if l := len(k2); l < max {
|
||||
max = l
|
||||
}
|
||||
var i int
|
||||
for i = 0; i < max; i++ {
|
||||
if k1[i] != k2[i] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// Insert is used to add a newentry or update
|
||||
// an existing entry. Returns if updated.
|
||||
func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) {
|
||||
var parent *node
|
||||
n := t.root
|
||||
search := s
|
||||
for {
|
||||
// Handle key exhaution
|
||||
if len(search) == 0 {
|
||||
if n.isLeaf() {
|
||||
old := n.leaf.val
|
||||
n.leaf.val = v
|
||||
return old, true
|
||||
}
|
||||
|
||||
n.leaf = &leafNode{
|
||||
key: s,
|
||||
val: v,
|
||||
}
|
||||
t.size++
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Look for the edge
|
||||
parent = n
|
||||
n = n.getEdge(search[0])
|
||||
|
||||
// No edge, create one
|
||||
if n == nil {
|
||||
e := edge{
|
||||
label: search[0],
|
||||
node: &node{
|
||||
leaf: &leafNode{
|
||||
key: s,
|
||||
val: v,
|
||||
},
|
||||
prefix: search,
|
||||
},
|
||||
}
|
||||
parent.addEdge(e)
|
||||
t.size++
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Determine longest prefix of the search key on match
|
||||
commonPrefix := longestPrefix(search, n.prefix)
|
||||
if commonPrefix == len(n.prefix) {
|
||||
search = search[commonPrefix:]
|
||||
continue
|
||||
}
|
||||
|
||||
// Split the node
|
||||
t.size++
|
||||
child := &node{
|
||||
prefix: search[:commonPrefix],
|
||||
}
|
||||
parent.replaceEdge(edge{
|
||||
label: search[0],
|
||||
node: child,
|
||||
})
|
||||
|
||||
// Restore the existing node
|
||||
child.addEdge(edge{
|
||||
label: n.prefix[commonPrefix],
|
||||
node: n,
|
||||
})
|
||||
n.prefix = n.prefix[commonPrefix:]
|
||||
|
||||
// Create a new leaf node
|
||||
leaf := &leafNode{
|
||||
key: s,
|
||||
val: v,
|
||||
}
|
||||
|
||||
// If the new key is a subset, add to to this node
|
||||
search = search[commonPrefix:]
|
||||
if len(search) == 0 {
|
||||
child.leaf = leaf
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Create a new edge for the node
|
||||
child.addEdge(edge{
|
||||
label: search[0],
|
||||
node: &node{
|
||||
leaf: leaf,
|
||||
prefix: search,
|
||||
},
|
||||
})
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
// Delete is used to delete a key, returning the previous
|
||||
// value and if it was deleted
|
||||
func (t *Tree) Delete(s string) (interface{}, bool) {
|
||||
var parent *node
|
||||
var label byte
|
||||
n := t.root
|
||||
search := s
|
||||
for {
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
if !n.isLeaf() {
|
||||
break
|
||||
}
|
||||
goto DELETE
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
parent = n
|
||||
label = search[0]
|
||||
n = n.getEdge(label)
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
|
||||
DELETE:
|
||||
// Delete the leaf
|
||||
leaf := n.leaf
|
||||
n.leaf = nil
|
||||
t.size--
|
||||
|
||||
// Check if we should delete this node from the parent
|
||||
if parent != nil && len(n.edges) == 0 {
|
||||
parent.delEdge(label)
|
||||
}
|
||||
|
||||
// Check if we should merge this node
|
||||
if n != t.root && len(n.edges) == 1 {
|
||||
n.mergeChild()
|
||||
}
|
||||
|
||||
// Check if we should merge the parent's other child
|
||||
if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() {
|
||||
parent.mergeChild()
|
||||
}
|
||||
|
||||
return leaf.val, true
|
||||
}
|
||||
|
||||
func (n *node) mergeChild() {
|
||||
e := n.edges[0]
|
||||
child := e.node
|
||||
n.prefix = n.prefix + child.prefix
|
||||
n.leaf = child.leaf
|
||||
n.edges = child.edges
|
||||
}
|
||||
|
||||
// Get is used to lookup a specific key, returning
|
||||
// the value and if it was found
|
||||
func (t *Tree) Get(s string) (interface{}, bool) {
|
||||
n := t.root
|
||||
search := s
|
||||
for {
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
if n.isLeaf() {
|
||||
return n.leaf.val, true
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// LongestPrefix is like Get, but instead of an
|
||||
// exact match, it will return the longest prefix match.
|
||||
func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) {
|
||||
var last *leafNode
|
||||
n := t.root
|
||||
search := s
|
||||
for {
|
||||
// Look for a leaf node
|
||||
if n.isLeaf() {
|
||||
last = n.leaf
|
||||
}
|
||||
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if last != nil {
|
||||
return last.key, last.val, true
|
||||
}
|
||||
return "", nil, false
|
||||
}
|
||||
|
||||
// Minimum is used to return the minimum value in the tree
|
||||
func (t *Tree) Minimum() (string, interface{}, bool) {
|
||||
n := t.root
|
||||
for {
|
||||
if n.isLeaf() {
|
||||
return n.leaf.key, n.leaf.val, true
|
||||
}
|
||||
if len(n.edges) > 0 {
|
||||
n = n.edges[0].node
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return "", nil, false
|
||||
}
|
||||
|
||||
// Maximum is used to return the maximum value in the tree
|
||||
func (t *Tree) Maximum() (string, interface{}, bool) {
|
||||
n := t.root
|
||||
for {
|
||||
if num := len(n.edges); num > 0 {
|
||||
n = n.edges[num-1].node
|
||||
continue
|
||||
}
|
||||
if n.isLeaf() {
|
||||
return n.leaf.key, n.leaf.val, true
|
||||
}
|
||||
break
|
||||
}
|
||||
return "", nil, false
|
||||
}
|
||||
|
||||
// Walk is used to walk the tree
|
||||
func (t *Tree) Walk(fn WalkFn) {
|
||||
recursiveWalk(t.root, fn)
|
||||
}
|
||||
|
||||
// WalkPrefix is used to walk the tree under a prefix
|
||||
func (t *Tree) WalkPrefix(prefix string, fn WalkFn) {
|
||||
n := t.root
|
||||
search := prefix
|
||||
for {
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
recursiveWalk(n, fn)
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
|
||||
} else if strings.HasPrefix(n.prefix, search) {
|
||||
// Child may be under our search prefix
|
||||
recursiveWalk(n, fn)
|
||||
return
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// WalkPath is used to walk the tree, but only visiting nodes
|
||||
// from the root down to a given leaf. Where WalkPrefix walks
|
||||
// all the entries *under* the given prefix, this walks the
|
||||
// entries *above* the given prefix.
|
||||
func (t *Tree) WalkPath(path string, fn WalkFn) {
|
||||
n := t.root
|
||||
search := path
|
||||
for {
|
||||
// Visit the leaf values if any
|
||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// recursiveWalk is used to do a pre-order walk of a node
|
||||
// recursively. Returns true if the walk should be aborted
|
||||
func recursiveWalk(n *node, fn WalkFn) bool {
|
||||
// Visit the leaf values if any
|
||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Recurse on the children
|
||||
for _, e := range n.edges {
|
||||
if recursiveWalk(e.node, fn) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ToMap is used to walk the tree and convert it into a map
|
||||
func (t *Tree) ToMap() map[string]interface{} {
|
||||
out := make(map[string]interface{}, t.size)
|
||||
t.Walk(func(k string, v interface{}) bool {
|
||||
out[k] = v
|
||||
return false
|
||||
})
|
||||
return out
|
||||
}
|
|
@ -0,0 +1,311 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
)
|
||||
|
||||
// UseServiceDefaultRetries instructs the config to use the service's own default
|
||||
// number of retries. This will be the default action if Config.MaxRetries
|
||||
// is nil also.
|
||||
const UseServiceDefaultRetries = -1
|
||||
|
||||
// RequestRetryer is an alias for a type that implements the request.Retryer interface.
|
||||
type RequestRetryer interface{}
|
||||
|
||||
// A Config provides service configuration for service clients. By default,
|
||||
// all clients will use the {defaults.DefaultConfig} structure.
|
||||
type Config struct {
|
||||
// Enables verbose error printing of all credential chain errors.
|
||||
// Should be used when wanting to see all errors while attempting to retreive
|
||||
// credentials.
|
||||
CredentialsChainVerboseErrors *bool
|
||||
|
||||
// The credentials object to use when signing requests. Defaults to
|
||||
// a chain of credential providers to search for credentials in environment
|
||||
// variables, shared credential file, and EC2 Instance Roles.
|
||||
Credentials *credentials.Credentials
|
||||
|
||||
// An optional endpoint URL (hostname only or fully qualified URI)
|
||||
// that overrides the default generated endpoint for a client. Set this
|
||||
// to `""` to use the default generated endpoint.
|
||||
//
|
||||
// @note You must still provide a `Region` value when specifying an
|
||||
// endpoint for a client.
|
||||
Endpoint *string
|
||||
|
||||
// The region to send requests to. This parameter is required and must
|
||||
// be configured globally or on a per-client basis unless otherwise
|
||||
// noted. A full list of regions is found in the "Regions and Endpoints"
|
||||
// document.
|
||||
//
|
||||
// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||
// AWS Regions and Endpoints
|
||||
Region *string
|
||||
|
||||
// Set this to `true` to disable SSL when sending requests. Defaults
|
||||
// to `false`.
|
||||
DisableSSL *bool
|
||||
|
||||
// The HTTP client to use when sending requests. Defaults to
|
||||
// `http.DefaultClient`.
|
||||
HTTPClient *http.Client
|
||||
|
||||
// An integer value representing the logging level. The default log level
|
||||
// is zero (LogOff), which represents no logging. To enable logging set
|
||||
// to a LogLevel Value.
|
||||
LogLevel *LogLevelType
|
||||
|
||||
// The logger writer interface to write logging messages to. Defaults to
|
||||
// standard out.
|
||||
Logger Logger
|
||||
|
||||
// The maximum number of times that a request will be retried for failures.
|
||||
// Defaults to -1, which defers the max retry setting to the service specific
|
||||
// configuration.
|
||||
MaxRetries *int
|
||||
|
||||
// Retryer guides how HTTP requests should be retried in case of recoverable failures.
|
||||
//
|
||||
// When nil or the value does not implement the request.Retryer interface,
|
||||
// the request.DefaultRetryer will be used.
|
||||
//
|
||||
// When both Retryer and MaxRetries are non-nil, the former is used and
|
||||
// the latter ignored.
|
||||
//
|
||||
// To set the Retryer field in a type-safe manner and with chaining, use
|
||||
// the request.WithRetryer helper function:
|
||||
//
|
||||
// cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
|
||||
//
|
||||
Retryer RequestRetryer
|
||||
|
||||
// Disables semantic parameter validation, which validates input for missing
|
||||
// required fields and/or other semantic request input errors.
|
||||
DisableParamValidation *bool
|
||||
|
||||
// Disables the computation of request and response checksums, e.g.,
|
||||
// CRC32 checksums in Amazon DynamoDB.
|
||||
DisableComputeChecksums *bool
|
||||
|
||||
// Set this to `true` to force the request to use path-style addressing,
|
||||
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will
|
||||
// use virtual hosted bucket addressing when possible
|
||||
// (`http://BUCKET.s3.amazonaws.com/KEY`).
|
||||
//
|
||||
// @note This configuration option is specific to the Amazon S3 service.
|
||||
// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
|
||||
// Amazon S3: Virtual Hosting of Buckets
|
||||
S3ForcePathStyle *bool
|
||||
|
||||
// Set this to `true` to disable the EC2Metadata client from overriding the
|
||||
// default http.Client's Timeout. This is helpful if you do not want the EC2Metadata
|
||||
// client to create a new http.Client. This options is only meaningful if you're not
|
||||
// already using a custom HTTP client with the SDK. Enabled by default.
|
||||
//
|
||||
// Must be set and provided to the session.New() in order to disable the EC2Metadata
|
||||
// overriding the timeout for default credentials chain.
|
||||
//
|
||||
// Example:
|
||||
// sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true))
|
||||
// svc := s3.New(sess)
|
||||
//
|
||||
EC2MetadataDisableTimeoutOverride *bool
|
||||
|
||||
SleepDelay func(time.Duration)
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config pointer that can be chained with builder methods to
|
||||
// set multiple configuration values inline without using pointers.
|
||||
//
|
||||
// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10))
|
||||
//
|
||||
func NewConfig() *Config {
|
||||
return &Config{}
|
||||
}
|
||||
|
||||
// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
|
||||
// a Config pointer.
|
||||
func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
|
||||
c.CredentialsChainVerboseErrors = &verboseErrs
|
||||
return c
|
||||
}
|
||||
|
||||
// WithCredentials sets a config Credentials value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
|
||||
c.Credentials = creds
|
||||
return c
|
||||
}
|
||||
|
||||
// WithEndpoint sets a config Endpoint value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithEndpoint(endpoint string) *Config {
|
||||
c.Endpoint = &endpoint
|
||||
return c
|
||||
}
|
||||
|
||||
// WithRegion sets a config Region value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithRegion(region string) *Config {
|
||||
c.Region = ®ion
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableSSL sets a config DisableSSL value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithDisableSSL(disable bool) *Config {
|
||||
c.DisableSSL = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithHTTPClient sets a config HTTPClient value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithHTTPClient(client *http.Client) *Config {
|
||||
c.HTTPClient = client
|
||||
return c
|
||||
}
|
||||
|
||||
// WithMaxRetries sets a config MaxRetries value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithMaxRetries(max int) *Config {
|
||||
c.MaxRetries = &max
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableParamValidation sets a config DisableParamValidation value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithDisableParamValidation(disable bool) *Config {
|
||||
c.DisableParamValidation = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableComputeChecksums sets a config DisableComputeChecksums value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
|
||||
c.DisableComputeChecksums = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithLogLevel sets a config LogLevel value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithLogLevel(level LogLevelType) *Config {
|
||||
c.LogLevel = &level
|
||||
return c
|
||||
}
|
||||
|
||||
// WithLogger sets a config Logger value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithLogger(logger Logger) *Config {
|
||||
c.Logger = logger
|
||||
return c
|
||||
}
|
||||
|
||||
// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
|
||||
// pointer for chaining.
|
||||
func (c *Config) WithS3ForcePathStyle(force bool) *Config {
|
||||
c.S3ForcePathStyle = &force
|
||||
return c
|
||||
}
|
||||
|
||||
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
|
||||
c.EC2MetadataDisableTimeoutOverride = &enable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithSleepDelay overrides the function used to sleep while waiting for the
|
||||
// next retry. Defaults to time.Sleep.
|
||||
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
|
||||
c.SleepDelay = fn
|
||||
return c
|
||||
}
|
||||
|
||||
// MergeIn merges the passed in configs into the existing config object.
|
||||
func (c *Config) MergeIn(cfgs ...*Config) {
|
||||
for _, other := range cfgs {
|
||||
mergeInConfig(c, other)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeInConfig(dst *Config, other *Config) {
|
||||
if other == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if other.CredentialsChainVerboseErrors != nil {
|
||||
dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
|
||||
}
|
||||
|
||||
if other.Credentials != nil {
|
||||
dst.Credentials = other.Credentials
|
||||
}
|
||||
|
||||
if other.Endpoint != nil {
|
||||
dst.Endpoint = other.Endpoint
|
||||
}
|
||||
|
||||
if other.Region != nil {
|
||||
dst.Region = other.Region
|
||||
}
|
||||
|
||||
if other.DisableSSL != nil {
|
||||
dst.DisableSSL = other.DisableSSL
|
||||
}
|
||||
|
||||
if other.HTTPClient != nil {
|
||||
dst.HTTPClient = other.HTTPClient
|
||||
}
|
||||
|
||||
if other.LogLevel != nil {
|
||||
dst.LogLevel = other.LogLevel
|
||||
}
|
||||
|
||||
if other.Logger != nil {
|
||||
dst.Logger = other.Logger
|
||||
}
|
||||
|
||||
if other.MaxRetries != nil {
|
||||
dst.MaxRetries = other.MaxRetries
|
||||
}
|
||||
|
||||
if other.Retryer != nil {
|
||||
dst.Retryer = other.Retryer
|
||||
}
|
||||
|
||||
if other.DisableParamValidation != nil {
|
||||
dst.DisableParamValidation = other.DisableParamValidation
|
||||
}
|
||||
|
||||
if other.DisableComputeChecksums != nil {
|
||||
dst.DisableComputeChecksums = other.DisableComputeChecksums
|
||||
}
|
||||
|
||||
if other.S3ForcePathStyle != nil {
|
||||
dst.S3ForcePathStyle = other.S3ForcePathStyle
|
||||
}
|
||||
|
||||
if other.EC2MetadataDisableTimeoutOverride != nil {
|
||||
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
|
||||
}
|
||||
|
||||
if other.SleepDelay != nil {
|
||||
dst.SleepDelay = other.SleepDelay
|
||||
}
|
||||
}
|
||||
|
||||
// Copy will return a shallow copy of the Config object. If any additional
|
||||
// configurations are provided they will be merged into the new config returned.
|
||||
func (c *Config) Copy(cfgs ...*Config) *Config {
|
||||
dst := &Config{}
|
||||
dst.MergeIn(c)
|
||||
|
||||
for _, cfg := range cfgs {
|
||||
dst.MergeIn(cfg)
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
|
@ -0,0 +1,357 @@
|
|||
package aws
|
||||
|
||||
import "time"
|
||||
|
||||
// String returns a pointer to of the string value passed in.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// StringValue returns the value of the string pointer passed in or
|
||||
// "" if the pointer is nil.
|
||||
func StringValue(v *string) string {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// StringSlice converts a slice of string values into a slice of
|
||||
// string pointers
|
||||
func StringSlice(src []string) []*string {
|
||||
dst := make([]*string, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringValueSlice converts a slice of string pointers into a slice of
|
||||
// string values
|
||||
func StringValueSlice(src []*string) []string {
|
||||
dst := make([]string, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringMap converts a string map of string values into a string
|
||||
// map of string pointers
|
||||
func StringMap(src map[string]string) map[string]*string {
|
||||
dst := make(map[string]*string)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringValueMap converts a string map of string pointers into a string
|
||||
// map of string values
|
||||
func StringValueMap(src map[string]*string) map[string]string {
|
||||
dst := make(map[string]string)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Bool returns a pointer to of the bool value passed in.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// BoolValue returns the value of the bool pointer passed in or
|
||||
// false if the pointer is nil.
|
||||
func BoolValue(v *bool) bool {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// BoolSlice converts a slice of bool values into a slice of
|
||||
// bool pointers
|
||||
func BoolSlice(src []bool) []*bool {
|
||||
dst := make([]*bool, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolValueSlice converts a slice of bool pointers into a slice of
|
||||
// bool values
|
||||
func BoolValueSlice(src []*bool) []bool {
|
||||
dst := make([]bool, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolMap converts a string map of bool values into a string
|
||||
// map of bool pointers
|
||||
func BoolMap(src map[string]bool) map[string]*bool {
|
||||
dst := make(map[string]*bool)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolValueMap converts a string map of bool pointers into a string
|
||||
// map of bool values
|
||||
func BoolValueMap(src map[string]*bool) map[string]bool {
|
||||
dst := make(map[string]bool)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int returns a pointer to of the int value passed in.
|
||||
func Int(v int) *int {
|
||||
return &v
|
||||
}
|
||||
|
||||
// IntValue returns the value of the int pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func IntValue(v *int) int {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// IntSlice converts a slice of int values into a slice of
|
||||
// int pointers
|
||||
func IntSlice(src []int) []*int {
|
||||
dst := make([]*int, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntValueSlice converts a slice of int pointers into a slice of
|
||||
// int values
|
||||
func IntValueSlice(src []*int) []int {
|
||||
dst := make([]int, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntMap converts a string map of int values into a string
|
||||
// map of int pointers
|
||||
func IntMap(src map[string]int) map[string]*int {
|
||||
dst := make(map[string]*int)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntValueMap converts a string map of int pointers into a string
|
||||
// map of int values
|
||||
func IntValueMap(src map[string]*int) map[string]int {
|
||||
dst := make(map[string]int)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64 returns a pointer to of the int64 value passed in.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int64Value returns the value of the int64 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Int64Value(v *int64) int64 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Int64Slice converts a slice of int64 values into a slice of
|
||||
// int64 pointers
|
||||
func Int64Slice(src []int64) []*int64 {
|
||||
dst := make([]*int64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64ValueSlice converts a slice of int64 pointers into a slice of
|
||||
// int64 values
|
||||
func Int64ValueSlice(src []*int64) []int64 {
|
||||
dst := make([]int64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64Map converts a string map of int64 values into a string
|
||||
// map of int64 pointers
|
||||
func Int64Map(src map[string]int64) map[string]*int64 {
|
||||
dst := make(map[string]*int64)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64ValueMap converts a string map of int64 pointers into a string
|
||||
// map of int64 values
|
||||
func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
||||
dst := make(map[string]int64)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64 returns a pointer to of the float64 value passed in.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64Value returns the value of the float64 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Float64Value(v *float64) float64 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Float64Slice converts a slice of float64 values into a slice of
|
||||
// float64 pointers
|
||||
func Float64Slice(src []float64) []*float64 {
|
||||
dst := make([]*float64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64ValueSlice converts a slice of float64 pointers into a slice of
|
||||
// float64 values
|
||||
func Float64ValueSlice(src []*float64) []float64 {
|
||||
dst := make([]float64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64Map converts a string map of float64 values into a string
|
||||
// map of float64 pointers
|
||||
func Float64Map(src map[string]float64) map[string]*float64 {
|
||||
dst := make(map[string]*float64)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64ValueMap converts a string map of float64 pointers into a string
|
||||
// map of float64 values
|
||||
func Float64ValueMap(src map[string]*float64) map[string]float64 {
|
||||
dst := make(map[string]float64)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Time returns a pointer to of the time.Time value passed in.
|
||||
func Time(v time.Time) *time.Time {
|
||||
return &v
|
||||
}
|
||||
|
||||
// TimeValue returns the value of the time.Time pointer passed in or
|
||||
// time.Time{} if the pointer is nil.
|
||||
func TimeValue(v *time.Time) time.Time {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// TimeSlice converts a slice of time.Time values into a slice of
|
||||
// time.Time pointers
|
||||
func TimeSlice(src []time.Time) []*time.Time {
|
||||
dst := make([]*time.Time, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeValueSlice converts a slice of time.Time pointers into a slice of
|
||||
// time.Time values
|
||||
func TimeValueSlice(src []*time.Time) []time.Time {
|
||||
dst := make([]time.Time, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeMap converts a string map of time.Time values into a string
|
||||
// map of time.Time pointers
|
||||
func TimeMap(src map[string]time.Time) map[string]*time.Time {
|
||||
dst := make(map[string]*time.Time)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeValueMap converts a string map of time.Time pointers into a string
|
||||
// map of time.Time values
|
||||
func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
|
||||
dst := make(map[string]time.Time)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
134
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
Normal file
134
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
Normal file
|
@ -0,0 +1,134 @@
|
|||
// Package stscreds are credential Providers to retrieve STS AWS credentials.
|
||||
//
|
||||
// STS provides multiple ways to retrieve credentials which can be used when making
|
||||
// future AWS service API operation calls.
|
||||
package stscreds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
)
|
||||
|
||||
// ProviderName provides a name of AssumeRole provider
|
||||
const ProviderName = "AssumeRoleProvider"
|
||||
|
||||
// AssumeRoler represents the minimal subset of the STS client API used by this provider.
|
||||
type AssumeRoler interface {
|
||||
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
|
||||
}
|
||||
|
||||
// DefaultDuration is the default amount of time in minutes that the credentials
|
||||
// will be valid for.
|
||||
var DefaultDuration = time.Duration(15) * time.Minute
|
||||
|
||||
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
|
||||
// keeps track of their expiration time. This provider must be used explicitly,
|
||||
// as it is not included in the credentials chain.
|
||||
type AssumeRoleProvider struct {
|
||||
credentials.Expiry
|
||||
|
||||
// STS client to make assume role request with.
|
||||
Client AssumeRoler
|
||||
|
||||
// Role to be assumed.
|
||||
RoleARN string
|
||||
|
||||
// Session name, if you wish to reuse the credentials elsewhere.
|
||||
RoleSessionName string
|
||||
|
||||
// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
|
||||
Duration time.Duration
|
||||
|
||||
// Optional ExternalID to pass along, defaults to nil if not set.
|
||||
ExternalID *string
|
||||
|
||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||
// the credentials actually expiring. This is beneficial so race conditions
|
||||
// with expiring credentials do not cause request to fail unexpectedly
|
||||
// due to ExpiredTokenException exceptions.
|
||||
//
|
||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||
// 10 seconds before the credentials are actually expired.
|
||||
//
|
||||
// If ExpiryWindow is 0 or less it will be ignored.
|
||||
ExpiryWindow time.Duration
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials object wrapping the
|
||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||
// role will be named after a nanosecond timestamp of this operation.
|
||||
//
|
||||
// Takes a Config provider to create the STS client. The ConfigProvider is
|
||||
// satisfied by the session.Session type.
|
||||
func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
|
||||
p := &AssumeRoleProvider{
|
||||
Client: sts.New(c),
|
||||
RoleARN: roleARN,
|
||||
Duration: DefaultDuration,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
|
||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||
// role will be named after a nanosecond timestamp of this operation.
|
||||
//
|
||||
// Takes an AssumeRoler which can be satisfiede by the STS client.
|
||||
func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
|
||||
p := &AssumeRoleProvider{
|
||||
Client: svc,
|
||||
RoleARN: roleARN,
|
||||
Duration: DefaultDuration,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// Retrieve generates a new set of temporary credentials using STS.
|
||||
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
|
||||
|
||||
// Apply defaults where parameters are not set.
|
||||
if p.RoleSessionName == "" {
|
||||
// Try to work out a role name that will hopefully end up unique.
|
||||
p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
|
||||
}
|
||||
if p.Duration == 0 {
|
||||
// Expire as often as AWS permits.
|
||||
p.Duration = DefaultDuration
|
||||
}
|
||||
|
||||
roleOutput, err := p.Client.AssumeRole(&sts.AssumeRoleInput{
|
||||
DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
|
||||
RoleArn: aws.String(p.RoleARN),
|
||||
RoleSessionName: aws.String(p.RoleSessionName),
|
||||
ExternalId: p.ExternalID,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
|
||||
// We will proactively generate new credentials before they expire.
|
||||
p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
|
||||
|
||||
return credentials.Value{
|
||||
AccessKeyID: *roleOutput.Credentials.AccessKeyId,
|
||||
SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
|
||||
SessionToken: *roleOutput.Credentials.SessionToken,
|
||||
ProviderName: ProviderName,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
package aws
|
||||
|
||||
import "github.com/aws/aws-sdk-go/aws/awserr"
|
||||
|
||||
var (
|
||||
// ErrMissingRegion is an error that is returned if region configuration is
|
||||
// not found.
|
||||
//
|
||||
// @readonly
|
||||
ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
|
||||
|
||||
// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
|
||||
// resolved for a service.
|
||||
//
|
||||
// @readonly
|
||||
ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
|
||||
)
|
|
@ -0,0 +1,98 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// A LogLevelType defines the level logging should be performed at. Used to instruct
|
||||
// the SDK which statements should be logged.
|
||||
type LogLevelType uint
|
||||
|
||||
// LogLevel returns the pointer to a LogLevel. Should be used to workaround
|
||||
// not being able to take the address of a non-composite literal.
|
||||
func LogLevel(l LogLevelType) *LogLevelType {
|
||||
return &l
|
||||
}
|
||||
|
||||
// Value returns the LogLevel value or the default value LogOff if the LogLevel
|
||||
// is nil. Safe to use on nil value LogLevelTypes.
|
||||
func (l *LogLevelType) Value() LogLevelType {
|
||||
if l != nil {
|
||||
return *l
|
||||
}
|
||||
return LogOff
|
||||
}
|
||||
|
||||
// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
|
||||
// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
|
||||
// LogLevel is nill, will default to LogOff comparison.
|
||||
func (l *LogLevelType) Matches(v LogLevelType) bool {
|
||||
c := l.Value()
|
||||
return c&v == v
|
||||
}
|
||||
|
||||
// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
|
||||
// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
|
||||
// to LogOff comparison.
|
||||
func (l *LogLevelType) AtLeast(v LogLevelType) bool {
|
||||
c := l.Value()
|
||||
return c >= v
|
||||
}
|
||||
|
||||
const (
|
||||
// LogOff states that no logging should be performed by the SDK. This is the
|
||||
// default state of the SDK, and should be use to disable all logging.
|
||||
LogOff LogLevelType = iota * 0x1000
|
||||
|
||||
// LogDebug state that debug output should be logged by the SDK. This should
|
||||
// be used to inspect request made and responses received.
|
||||
LogDebug
|
||||
)
|
||||
|
||||
// Debug Logging Sub Levels
|
||||
const (
|
||||
// LogDebugWithSigning states that the SDK should log request signing and
|
||||
// presigning events. This should be used to log the signing details of
|
||||
// requests for debugging. Will also enable LogDebug.
|
||||
LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
|
||||
|
||||
// LogDebugWithHTTPBody states the SDK should log HTTP request and response
|
||||
// HTTP bodys in addition to the headers and path. This should be used to
|
||||
// see the body content of requests and responses made while using the SDK
|
||||
// Will also enable LogDebug.
|
||||
LogDebugWithHTTPBody
|
||||
|
||||
// LogDebugWithRequestRetries states the SDK should log when service requests will
|
||||
// be retried. This should be used to log when you want to log when service
|
||||
// requests are being retried. Will also enable LogDebug.
|
||||
LogDebugWithRequestRetries
|
||||
|
||||
// LogDebugWithRequestErrors states the SDK should log when service requests fail
|
||||
// to build, send, validate, or unmarshal.
|
||||
LogDebugWithRequestErrors
|
||||
)
|
||||
|
||||
// A Logger is a minimalistic interface for the SDK to log messages to. Should
|
||||
// be used to provide custom logging writers for the SDK to use.
|
||||
type Logger interface {
|
||||
Log(...interface{})
|
||||
}
|
||||
|
||||
// NewDefaultLogger returns a Logger which will write log messages to stdout, and
|
||||
// use same formatting runes as the stdlib log.Logger
|
||||
func NewDefaultLogger() Logger {
|
||||
return &defaultLogger{
|
||||
logger: log.New(os.Stdout, "", log.LstdFlags),
|
||||
}
|
||||
}
|
||||
|
||||
// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
|
||||
type defaultLogger struct {
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// Log logs the parameters to the stdlib logger. See log.Println.
|
||||
func (l defaultLogger) Log(args ...interface{}) {
|
||||
l.logger.Println(args...)
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser
|
||||
func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
|
||||
return ReaderSeekerCloser{r}
|
||||
}
|
||||
|
||||
// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
|
||||
// io.Closer interfaces to the underlying object if they are available.
|
||||
type ReaderSeekerCloser struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
// Read reads from the reader up to size of p. The number of bytes read, and
|
||||
// error if it occurred will be returned.
|
||||
//
|
||||
// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
|
||||
//
|
||||
// Performs the same functionality as io.Reader Read
|
||||
func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
|
||||
switch t := r.r.(type) {
|
||||
case io.Reader:
|
||||
return t.Read(p)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Seek sets the offset for the next Read to offset, interpreted according to
|
||||
// whence: 0 means relative to the origin of the file, 1 means relative to the
|
||||
// current offset, and 2 means relative to the end. Seek returns the new offset
|
||||
// and an error, if any.
|
||||
//
|
||||
// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
|
||||
func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
|
||||
switch t := r.r.(type) {
|
||||
case io.Seeker:
|
||||
return t.Seek(offset, whence)
|
||||
}
|
||||
return int64(0), nil
|
||||
}
|
||||
|
||||
// Close closes the ReaderSeekerCloser.
|
||||
//
|
||||
// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
|
||||
func (r ReaderSeekerCloser) Close() error {
|
||||
switch t := r.r.(type) {
|
||||
case io.Closer:
|
||||
return t.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
|
||||
// Can be used with the s3manager.Downloader to download content to a buffer
|
||||
// in memory. Safe to use concurrently.
|
||||
type WriteAtBuffer struct {
|
||||
buf []byte
|
||||
m sync.Mutex
|
||||
}
|
||||
|
||||
// WriteAt writes a slice of bytes to a buffer starting at the position provided
|
||||
// The number of bytes written will be returned, or error. Can overwrite previous
|
||||
// written slices if the write ats overlap.
|
||||
func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
|
||||
b.m.Lock()
|
||||
defer b.m.Unlock()
|
||||
|
||||
expLen := pos + int64(len(p))
|
||||
if int64(len(b.buf)) < expLen {
|
||||
newBuf := make([]byte, expLen)
|
||||
copy(newBuf, b.buf)
|
||||
b.buf = newBuf
|
||||
}
|
||||
copy(b.buf[pos:], p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Bytes returns a slice of bytes written to the buffer.
|
||||
func (b *WriteAtBuffer) Bytes() []byte {
|
||||
b.m.Lock()
|
||||
defer b.m.Unlock()
|
||||
return b.buf[:len(b.buf):len(b.buf)]
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
// Package aws provides core functionality for making requests to AWS services.
|
||||
package aws
|
||||
|
||||
// SDKName is the name of this AWS SDK
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.1.2"
|
|
@ -0,0 +1,65 @@
|
|||
// Package endpoints validates regional endpoints for services.
|
||||
package endpoints
|
||||
|
||||
//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
|
||||
//go:generate gofmt -s -w endpoints_map.go
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NormalizeEndpoint takes and endpoint and service API information to return a
|
||||
// normalized endpoint and signing region. If the endpoint is not an empty string
|
||||
// the service name and region will be used to look up the service's API endpoint.
|
||||
// If the endpoint is provided the scheme will be added if it is not present.
|
||||
func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) {
|
||||
if endpoint == "" {
|
||||
return EndpointForRegion(serviceName, region, disableSSL)
|
||||
}
|
||||
|
||||
return AddScheme(endpoint, disableSSL), ""
|
||||
}
|
||||
|
||||
// EndpointForRegion returns an endpoint and its signing region for a service and region.
|
||||
// if the service and region pair are not found endpoint and signingRegion will be empty.
|
||||
func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) {
|
||||
derivedKeys := []string{
|
||||
region + "/" + svcName,
|
||||
region + "/*",
|
||||
"*/" + svcName,
|
||||
"*/*",
|
||||
}
|
||||
|
||||
for _, key := range derivedKeys {
|
||||
if val, ok := endpointsMap.Endpoints[key]; ok {
|
||||
ep := val.Endpoint
|
||||
ep = strings.Replace(ep, "{region}", region, -1)
|
||||
ep = strings.Replace(ep, "{service}", svcName, -1)
|
||||
|
||||
endpoint = ep
|
||||
signingRegion = val.SigningRegion
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return AddScheme(endpoint, disableSSL), signingRegion
|
||||
}
|
||||
|
||||
// Regular expression to determine if the endpoint string is prefixed with a scheme.
|
||||
var schemeRE = regexp.MustCompile("^([^:]+)://")
|
||||
|
||||
// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
|
||||
// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS.
|
||||
func AddScheme(endpoint string, disableSSL bool) string {
|
||||
if endpoint != "" && !schemeRE.MatchString(endpoint) {
|
||||
scheme := "https"
|
||||
if disableSSL {
|
||||
scheme = "http"
|
||||
}
|
||||
endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
|
||||
}
|
||||
|
||||
return endpoint
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
{
|
||||
"version": 2,
|
||||
"endpoints": {
|
||||
"*/*": {
|
||||
"endpoint": "{service}.{region}.amazonaws.com"
|
||||
},
|
||||
"cn-north-1/*": {
|
||||
"endpoint": "{service}.{region}.amazonaws.com.cn",
|
||||
"signatureVersion": "v4"
|
||||
},
|
||||
"us-gov-west-1/iam": {
|
||||
"endpoint": "iam.us-gov.amazonaws.com"
|
||||
},
|
||||
"us-gov-west-1/sts": {
|
||||
"endpoint": "sts.us-gov-west-1.amazonaws.com"
|
||||
},
|
||||
"us-gov-west-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"*/cloudfront": {
|
||||
"endpoint": "cloudfront.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/cloudsearchdomain": {
|
||||
"endpoint": "",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/data.iot": {
|
||||
"endpoint": "",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/ec2metadata": {
|
||||
"endpoint": "http://169.254.169.254/latest",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/iam": {
|
||||
"endpoint": "iam.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/importexport": {
|
||||
"endpoint": "importexport.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/route53": {
|
||||
"endpoint": "route53.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/sts": {
|
||||
"endpoint": "sts.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/waf": {
|
||||
"endpoint": "waf.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"us-east-1/sdb": {
|
||||
"endpoint": "sdb.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"us-east-1/s3": {
|
||||
"endpoint": "s3.amazonaws.com"
|
||||
},
|
||||
"us-west-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"us-west-2/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"eu-west-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"ap-southeast-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"ap-southeast-2/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"ap-northeast-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"ap-northeast-2/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"sa-east-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"eu-central-1/s3": {
|
||||
"endpoint": "{service}.{region}.amazonaws.com",
|
||||
"signatureVersion": "v4"
|
||||
}
|
||||
}
|
||||
}
|
104
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
generated
vendored
Normal file
104
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
generated
vendored
Normal file
|
@ -0,0 +1,104 @@
|
|||
package endpoints
|
||||
|
||||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
type endpointStruct struct {
|
||||
Version int
|
||||
Endpoints map[string]endpointEntry
|
||||
}
|
||||
|
||||
type endpointEntry struct {
|
||||
Endpoint string
|
||||
SigningRegion string
|
||||
}
|
||||
|
||||
var endpointsMap = endpointStruct{
|
||||
Version: 2,
|
||||
Endpoints: map[string]endpointEntry{
|
||||
"*/*": {
|
||||
Endpoint: "{service}.{region}.amazonaws.com",
|
||||
},
|
||||
"*/cloudfront": {
|
||||
Endpoint: "cloudfront.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/cloudsearchdomain": {
|
||||
Endpoint: "",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/data.iot": {
|
||||
Endpoint: "",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/ec2metadata": {
|
||||
Endpoint: "http://169.254.169.254/latest",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/iam": {
|
||||
Endpoint: "iam.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/importexport": {
|
||||
Endpoint: "importexport.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/route53": {
|
||||
Endpoint: "route53.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/sts": {
|
||||
Endpoint: "sts.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/waf": {
|
||||
Endpoint: "waf.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"ap-northeast-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"ap-northeast-2/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"ap-southeast-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"ap-southeast-2/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"cn-north-1/*": {
|
||||
Endpoint: "{service}.{region}.amazonaws.com.cn",
|
||||
},
|
||||
"eu-central-1/s3": {
|
||||
Endpoint: "{service}.{region}.amazonaws.com",
|
||||
},
|
||||
"eu-west-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"sa-east-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"us-east-1/s3": {
|
||||
Endpoint: "s3.amazonaws.com",
|
||||
},
|
||||
"us-east-1/sdb": {
|
||||
Endpoint: "sdb.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"us-gov-west-1/iam": {
|
||||
Endpoint: "iam.us-gov.amazonaws.com",
|
||||
},
|
||||
"us-gov-west-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"us-gov-west-1/sts": {
|
||||
Endpoint: "sts.us-gov-west-1.amazonaws.com",
|
||||
},
|
||||
"us-west-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"us-west-2/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
},
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// RandReader is the random reader the protocol package will use to read
|
||||
// random bytes from. This is exported for testing, and should not be used.
|
||||
var RandReader = rand.Reader
|
||||
|
||||
const idempotencyTokenFillTag = `idempotencyToken`
|
||||
|
||||
// CanSetIdempotencyToken returns true if the struct field should be
|
||||
// automatically populated with a Idempotency token.
|
||||
//
|
||||
// Only *string and string type fields that are tagged with idempotencyToken
|
||||
// which are not already set can be auto filled.
|
||||
func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
|
||||
switch u := v.Interface().(type) {
|
||||
// To auto fill an Idempotency token the field must be a string,
|
||||
// tagged for auto fill, and have a zero value.
|
||||
case *string:
|
||||
return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
|
||||
case string:
|
||||
return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// GetIdempotencyToken returns a randomly generated idempotency token.
|
||||
func GetIdempotencyToken() string {
|
||||
b := make([]byte, 16)
|
||||
RandReader.Read(b)
|
||||
|
||||
return UUIDVersion4(b)
|
||||
}
|
||||
|
||||
// SetIdempotencyToken will set the value provided with a Idempotency Token.
|
||||
// Given that the value can be set. Will panic if value is not setable.
|
||||
func SetIdempotencyToken(v reflect.Value) {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
if v.IsNil() && v.CanSet() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
if !v.CanSet() {
|
||||
panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
|
||||
}
|
||||
|
||||
b := make([]byte, 16)
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
// TODO handle error
|
||||
return
|
||||
}
|
||||
|
||||
v.Set(reflect.ValueOf(UUIDVersion4(b)))
|
||||
}
|
||||
|
||||
// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
|
||||
func UUIDVersion4(u []byte) string {
|
||||
// https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
|
||||
// 13th character is "4"
|
||||
u[6] = (u[6] | 0x40) & 0x4F
|
||||
// 17th character is "8", "9", "a", or "b"
|
||||
u[8] = (u[8] | 0x80) & 0xBF
|
||||
|
||||
return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
|
||||
}
|
251
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
generated
vendored
Normal file
251
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
generated
vendored
Normal file
|
@ -0,0 +1,251 @@
|
|||
// Package jsonutil provides JSON serialisation of AWS requests and responses.
|
||||
package jsonutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
var timeType = reflect.ValueOf(time.Time{}).Type()
|
||||
var byteSliceType = reflect.ValueOf([]byte{}).Type()
|
||||
|
||||
// BuildJSON builds a JSON string for a given object v.
|
||||
func BuildJSON(v interface{}) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
err := buildAny(reflect.ValueOf(v), &buf, "")
|
||||
return buf.Bytes(), err
|
||||
}
|
||||
|
||||
func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
|
||||
value = reflect.Indirect(value)
|
||||
if !value.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
vtype := value.Type()
|
||||
|
||||
t := tag.Get("type")
|
||||
if t == "" {
|
||||
switch vtype.Kind() {
|
||||
case reflect.Struct:
|
||||
// also it can't be a time object
|
||||
if value.Type() != timeType {
|
||||
t = "structure"
|
||||
}
|
||||
case reflect.Slice:
|
||||
// also it can't be a byte slice
|
||||
if _, ok := value.Interface().([]byte); !ok {
|
||||
t = "list"
|
||||
}
|
||||
case reflect.Map:
|
||||
t = "map"
|
||||
}
|
||||
}
|
||||
|
||||
switch t {
|
||||
case "structure":
|
||||
if field, ok := vtype.FieldByName("_"); ok {
|
||||
tag = field.Tag
|
||||
}
|
||||
return buildStruct(value, buf, tag)
|
||||
case "list":
|
||||
return buildList(value, buf, tag)
|
||||
case "map":
|
||||
return buildMap(value, buf, tag)
|
||||
default:
|
||||
return buildScalar(value, buf, tag)
|
||||
}
|
||||
}
|
||||
|
||||
func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
|
||||
if !value.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// unwrap payloads
|
||||
if payload := tag.Get("payload"); payload != "" {
|
||||
field, _ := value.Type().FieldByName(payload)
|
||||
tag = field.Tag
|
||||
value = elemOf(value.FieldByName(payload))
|
||||
|
||||
if !value.IsValid() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteByte('{')
|
||||
|
||||
t := value.Type()
|
||||
first := true
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
member := value.Field(i)
|
||||
field := t.Field(i)
|
||||
|
||||
if field.PkgPath != "" {
|
||||
continue // ignore unexported fields
|
||||
}
|
||||
if field.Tag.Get("json") == "-" {
|
||||
continue
|
||||
}
|
||||
if field.Tag.Get("location") != "" {
|
||||
continue // ignore non-body elements
|
||||
}
|
||||
|
||||
if protocol.CanSetIdempotencyToken(member, field) {
|
||||
token := protocol.GetIdempotencyToken()
|
||||
member = reflect.ValueOf(&token)
|
||||
}
|
||||
|
||||
if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() {
|
||||
continue // ignore unset fields
|
||||
}
|
||||
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
|
||||
// figure out what this field is called
|
||||
name := field.Name
|
||||
if locName := field.Tag.Get("locationName"); locName != "" {
|
||||
name = locName
|
||||
}
|
||||
|
||||
fmt.Fprintf(buf, "%q:", name)
|
||||
|
||||
err := buildAny(member, buf, field.Tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
buf.WriteString("}")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
|
||||
buf.WriteString("[")
|
||||
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
buildAny(value.Index(i), buf, "")
|
||||
|
||||
if i < value.Len()-1 {
|
||||
buf.WriteString(",")
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("]")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type sortedValues []reflect.Value
|
||||
|
||||
func (sv sortedValues) Len() int { return len(sv) }
|
||||
func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
|
||||
func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() }
|
||||
|
||||
func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
|
||||
buf.WriteString("{")
|
||||
|
||||
var sv sortedValues = value.MapKeys()
|
||||
sort.Sort(sv)
|
||||
|
||||
for i, k := range sv {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
|
||||
fmt.Fprintf(buf, "%q:", k)
|
||||
buildAny(value.MapIndex(k), buf, "")
|
||||
}
|
||||
|
||||
buf.WriteString("}")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildScalar(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
|
||||
switch value.Kind() {
|
||||
case reflect.String:
|
||||
writeString(value.String(), buf)
|
||||
case reflect.Bool:
|
||||
buf.WriteString(strconv.FormatBool(value.Bool()))
|
||||
case reflect.Int64:
|
||||
buf.WriteString(strconv.FormatInt(value.Int(), 10))
|
||||
case reflect.Float64:
|
||||
buf.WriteString(strconv.FormatFloat(value.Float(), 'f', -1, 64))
|
||||
default:
|
||||
switch value.Type() {
|
||||
case timeType:
|
||||
converted := value.Interface().(time.Time)
|
||||
buf.WriteString(strconv.FormatInt(converted.UTC().Unix(), 10))
|
||||
case byteSliceType:
|
||||
if !value.IsNil() {
|
||||
converted := value.Interface().([]byte)
|
||||
buf.WriteByte('"')
|
||||
if len(converted) < 1024 {
|
||||
// for small buffers, using Encode directly is much faster.
|
||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted)))
|
||||
base64.StdEncoding.Encode(dst, converted)
|
||||
buf.Write(dst)
|
||||
} else {
|
||||
// for large buffers, avoid unnecessary extra temporary
|
||||
// buffer space.
|
||||
enc := base64.NewEncoder(base64.StdEncoding, buf)
|
||||
enc.Write(converted)
|
||||
enc.Close()
|
||||
}
|
||||
buf.WriteByte('"')
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeString(s string, buf *bytes.Buffer) {
|
||||
buf.WriteByte('"')
|
||||
for _, r := range s {
|
||||
if r == '"' {
|
||||
buf.WriteString(`\"`)
|
||||
} else if r == '\\' {
|
||||
buf.WriteString(`\\`)
|
||||
} else if r == '\b' {
|
||||
buf.WriteString(`\b`)
|
||||
} else if r == '\f' {
|
||||
buf.WriteString(`\f`)
|
||||
} else if r == '\r' {
|
||||
buf.WriteString(`\r`)
|
||||
} else if r == '\t' {
|
||||
buf.WriteString(`\t`)
|
||||
} else if r == '\n' {
|
||||
buf.WriteString(`\n`)
|
||||
} else if r < 32 {
|
||||
fmt.Fprintf(buf, "\\u%0.4x", r)
|
||||
} else {
|
||||
buf.WriteRune(r)
|
||||
}
|
||||
}
|
||||
buf.WriteByte('"')
|
||||
}
|
||||
|
||||
// Returns the reflection element of a value, if it is a pointer.
|
||||
func elemOf(value reflect.Value) reflect.Value {
|
||||
for value.Kind() == reflect.Ptr {
|
||||
value = value.Elem()
|
||||
}
|
||||
return value
|
||||
}
|
213
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
generated
vendored
Normal file
213
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
generated
vendored
Normal file
|
@ -0,0 +1,213 @@
|
|||
package jsonutil
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UnmarshalJSON reads a stream and unmarshals the results in object v.
|
||||
func UnmarshalJSON(v interface{}, stream io.Reader) error {
|
||||
var out interface{}
|
||||
|
||||
b, err := ioutil.ReadAll(stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &out); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return unmarshalAny(reflect.ValueOf(v), out, "")
|
||||
}
|
||||
|
||||
func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error {
|
||||
vtype := value.Type()
|
||||
if vtype.Kind() == reflect.Ptr {
|
||||
vtype = vtype.Elem() // check kind of actual element type
|
||||
}
|
||||
|
||||
t := tag.Get("type")
|
||||
if t == "" {
|
||||
switch vtype.Kind() {
|
||||
case reflect.Struct:
|
||||
// also it can't be a time object
|
||||
if _, ok := value.Interface().(*time.Time); !ok {
|
||||
t = "structure"
|
||||
}
|
||||
case reflect.Slice:
|
||||
// also it can't be a byte slice
|
||||
if _, ok := value.Interface().([]byte); !ok {
|
||||
t = "list"
|
||||
}
|
||||
case reflect.Map:
|
||||
t = "map"
|
||||
}
|
||||
}
|
||||
|
||||
switch t {
|
||||
case "structure":
|
||||
if field, ok := vtype.FieldByName("_"); ok {
|
||||
tag = field.Tag
|
||||
}
|
||||
return unmarshalStruct(value, data, tag)
|
||||
case "list":
|
||||
return unmarshalList(value, data, tag)
|
||||
case "map":
|
||||
return unmarshalMap(value, data, tag)
|
||||
default:
|
||||
return unmarshalScalar(value, data, tag)
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
mapData, ok := data.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("JSON value is not a structure (%#v)", data)
|
||||
}
|
||||
|
||||
t := value.Type()
|
||||
if value.Kind() == reflect.Ptr {
|
||||
if value.IsNil() { // create the structure if it's nil
|
||||
s := reflect.New(value.Type().Elem())
|
||||
value.Set(s)
|
||||
value = s
|
||||
}
|
||||
|
||||
value = value.Elem()
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
// unwrap any payloads
|
||||
if payload := tag.Get("payload"); payload != "" {
|
||||
field, _ := t.FieldByName(payload)
|
||||
return unmarshalAny(value.FieldByName(payload), data, field.Tag)
|
||||
}
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
if field.PkgPath != "" {
|
||||
continue // ignore unexported fields
|
||||
}
|
||||
|
||||
// figure out what this field is called
|
||||
name := field.Name
|
||||
if locName := field.Tag.Get("locationName"); locName != "" {
|
||||
name = locName
|
||||
}
|
||||
|
||||
member := value.FieldByIndex(field.Index)
|
||||
err := unmarshalAny(member, mapData[name], field.Tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
listData, ok := data.([]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("JSON value is not a list (%#v)", data)
|
||||
}
|
||||
|
||||
if value.IsNil() {
|
||||
l := len(listData)
|
||||
value.Set(reflect.MakeSlice(value.Type(), l, l))
|
||||
}
|
||||
|
||||
for i, c := range listData {
|
||||
err := unmarshalAny(value.Index(i), c, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
mapData, ok := data.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("JSON value is not a map (%#v)", data)
|
||||
}
|
||||
|
||||
if value.IsNil() {
|
||||
value.Set(reflect.MakeMap(value.Type()))
|
||||
}
|
||||
|
||||
for k, v := range mapData {
|
||||
kvalue := reflect.ValueOf(k)
|
||||
vvalue := reflect.New(value.Type().Elem()).Elem()
|
||||
|
||||
unmarshalAny(vvalue, v, "")
|
||||
value.SetMapIndex(kvalue, vvalue)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error {
|
||||
errf := func() error {
|
||||
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
|
||||
}
|
||||
|
||||
switch d := data.(type) {
|
||||
case nil:
|
||||
return nil // nothing to do here
|
||||
case string:
|
||||
switch value.Interface().(type) {
|
||||
case *string:
|
||||
value.Set(reflect.ValueOf(&d))
|
||||
case []byte:
|
||||
b, err := base64.StdEncoding.DecodeString(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value.Set(reflect.ValueOf(b))
|
||||
default:
|
||||
return errf()
|
||||
}
|
||||
case float64:
|
||||
switch value.Interface().(type) {
|
||||
case *int64:
|
||||
di := int64(d)
|
||||
value.Set(reflect.ValueOf(&di))
|
||||
case *float64:
|
||||
value.Set(reflect.ValueOf(&d))
|
||||
case *time.Time:
|
||||
t := time.Unix(int64(d), 0).UTC()
|
||||
value.Set(reflect.ValueOf(&t))
|
||||
default:
|
||||
return errf()
|
||||
}
|
||||
case bool:
|
||||
switch value.Interface().(type) {
|
||||
case *bool:
|
||||
value.Set(reflect.ValueOf(&d))
|
||||
default:
|
||||
return errf()
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported JSON value (%v)", data)
|
||||
}
|
||||
return nil
|
||||
}
|
111
vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
generated
vendored
Normal file
111
vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
|||
// Package jsonrpc provides JSON RPC utilities for serialisation of AWS
|
||||
// requests and responses.
|
||||
package jsonrpc
|
||||
|
||||
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go
|
||||
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
var emptyJSON = []byte("{}")
|
||||
|
||||
// BuildHandler is a named request handler for building jsonrpc protocol requests
|
||||
var BuildHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Build", Fn: Build}
|
||||
|
||||
// UnmarshalHandler is a named request handler for unmarshaling jsonrpc protocol requests
|
||||
var UnmarshalHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Unmarshal", Fn: Unmarshal}
|
||||
|
||||
// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc protocol request metadata
|
||||
var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalMeta", Fn: UnmarshalMeta}
|
||||
|
||||
// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc protocol request errors
|
||||
var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalError", Fn: UnmarshalError}
|
||||
|
||||
// Build builds a JSON payload for a JSON RPC request.
|
||||
func Build(req *request.Request) {
|
||||
var buf []byte
|
||||
var err error
|
||||
if req.ParamsFilled() {
|
||||
buf, err = jsonutil.BuildJSON(req.Params)
|
||||
if err != nil {
|
||||
req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
buf = emptyJSON
|
||||
}
|
||||
|
||||
if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" {
|
||||
req.SetBufferBody(buf)
|
||||
}
|
||||
|
||||
if req.ClientInfo.TargetPrefix != "" {
|
||||
target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name
|
||||
req.HTTPRequest.Header.Add("X-Amz-Target", target)
|
||||
}
|
||||
if req.ClientInfo.JSONVersion != "" {
|
||||
jsonVersion := req.ClientInfo.JSONVersion
|
||||
req.HTTPRequest.Header.Add("Content-Type", "application/x-amz-json-"+jsonVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals a response for a JSON RPC service.
|
||||
func Unmarshal(req *request.Request) {
|
||||
defer req.HTTPResponse.Body.Close()
|
||||
if req.DataFilled() {
|
||||
err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
req.Error = awserr.New("SerializationError", "failed decoding JSON RPC response", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMeta unmarshals headers from a response for a JSON RPC service.
|
||||
func UnmarshalMeta(req *request.Request) {
|
||||
rest.UnmarshalMeta(req)
|
||||
}
|
||||
|
||||
// UnmarshalError unmarshals an error response for a JSON RPC service.
|
||||
func UnmarshalError(req *request.Request) {
|
||||
defer req.HTTPResponse.Body.Close()
|
||||
bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
req.Error = awserr.New("SerializationError", "failed reading JSON RPC error response", err)
|
||||
return
|
||||
}
|
||||
if len(bodyBytes) == 0 {
|
||||
req.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError", req.HTTPResponse.Status, nil),
|
||||
req.HTTPResponse.StatusCode,
|
||||
"",
|
||||
)
|
||||
return
|
||||
}
|
||||
var jsonErr jsonErrorResponse
|
||||
if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil {
|
||||
req.Error = awserr.New("SerializationError", "failed decoding JSON RPC error response", err)
|
||||
return
|
||||
}
|
||||
|
||||
codes := strings.SplitN(jsonErr.Code, "#", 2)
|
||||
req.Error = awserr.NewRequestFailure(
|
||||
awserr.New(codes[len(codes)-1], jsonErr.Message, nil),
|
||||
req.HTTPResponse.StatusCode,
|
||||
req.RequestID,
|
||||
)
|
||||
}
|
||||
|
||||
type jsonErrorResponse struct {
|
||||
Code string `json:"__type"`
|
||||
Message string `json:"message"`
|
||||
}
|
91
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
generated
vendored
Normal file
91
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
// Package restjson provides RESTful JSON serialisation of AWS
|
||||
// requests and responses.
|
||||
package restjson
|
||||
|
||||
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-json.json build_test.go
|
||||
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
// BuildHandler is a named request handler for building restjson protocol requests
|
||||
var BuildHandler = request.NamedHandler{Name: "awssdk.restjson.Build", Fn: Build}
|
||||
|
||||
// UnmarshalHandler is a named request handler for unmarshaling restjson protocol requests
|
||||
var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restjson.Unmarshal", Fn: Unmarshal}
|
||||
|
||||
// UnmarshalMetaHandler is a named request handler for unmarshaling restjson protocol request metadata
|
||||
var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restjson.UnmarshalMeta", Fn: UnmarshalMeta}
|
||||
|
||||
// UnmarshalErrorHandler is a named request handler for unmarshaling restjson protocol request errors
|
||||
var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restjson.UnmarshalError", Fn: UnmarshalError}
|
||||
|
||||
// Build builds a request for the REST JSON protocol.
|
||||
func Build(r *request.Request) {
|
||||
rest.Build(r)
|
||||
|
||||
if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
|
||||
jsonrpc.Build(r)
|
||||
}
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals a response body for the REST JSON protocol.
|
||||
func Unmarshal(r *request.Request) {
|
||||
if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
|
||||
jsonrpc.Unmarshal(r)
|
||||
} else {
|
||||
rest.Unmarshal(r)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalMeta unmarshals response headers for the REST JSON protocol.
|
||||
func UnmarshalMeta(r *request.Request) {
|
||||
rest.UnmarshalMeta(r)
|
||||
}
|
||||
|
||||
// UnmarshalError unmarshals a response error for the REST JSON protocol.
|
||||
func UnmarshalError(r *request.Request) {
|
||||
code := r.HTTPResponse.Header.Get("X-Amzn-Errortype")
|
||||
bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed reading REST JSON error response", err)
|
||||
return
|
||||
}
|
||||
if len(bodyBytes) == 0 {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError", r.HTTPResponse.Status, nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
"",
|
||||
)
|
||||
return
|
||||
}
|
||||
var jsonErr jsonErrorResponse
|
||||
if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed decoding REST JSON error response", err)
|
||||
return
|
||||
}
|
||||
|
||||
if code == "" {
|
||||
code = jsonErr.Code
|
||||
}
|
||||
|
||||
code = strings.SplitN(code, ":", 2)[0]
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(code, jsonErr.Message, nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
}
|
||||
|
||||
type jsonErrorResponse struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
|
||||
var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
|
||||
|
||||
// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
|
||||
func UnmarshalDiscardBody(r *request.Request) {
|
||||
if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
|
||||
return
|
||||
}
|
||||
|
||||
io.Copy(ioutil.Discard, r.HTTPResponse.Body)
|
||||
r.HTTPResponse.Body.Close()
|
||||
}
|
82
vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go
generated
vendored
Normal file
82
vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
package v4
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// validator houses a set of rule needed for validation of a
|
||||
// string value
|
||||
type rules []rule
|
||||
|
||||
// rule interface allows for more flexible rules and just simply
|
||||
// checks whether or not a value adheres to that rule
|
||||
type rule interface {
|
||||
IsValid(value string) bool
|
||||
}
|
||||
|
||||
// IsValid will iterate through all rules and see if any rules
|
||||
// apply to the value and supports nested rules
|
||||
func (r rules) IsValid(value string) bool {
|
||||
for _, rule := range r {
|
||||
if rule.IsValid(value) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// mapRule generic rule for maps
|
||||
type mapRule map[string]struct{}
|
||||
|
||||
// IsValid for the map rule satisfies whether it exists in the map
|
||||
func (m mapRule) IsValid(value string) bool {
|
||||
_, ok := m[value]
|
||||
return ok
|
||||
}
|
||||
|
||||
// whitelist is a generic rule for whitelisting
|
||||
type whitelist struct {
|
||||
rule
|
||||
}
|
||||
|
||||
// IsValid for whitelist checks if the value is within the whitelist
|
||||
func (w whitelist) IsValid(value string) bool {
|
||||
return w.rule.IsValid(value)
|
||||
}
|
||||
|
||||
// blacklist is a generic rule for blacklisting
|
||||
type blacklist struct {
|
||||
rule
|
||||
}
|
||||
|
||||
// IsValid for whitelist checks if the value is within the whitelist
|
||||
func (b blacklist) IsValid(value string) bool {
|
||||
return !b.rule.IsValid(value)
|
||||
}
|
||||
|
||||
type patterns []string
|
||||
|
||||
// IsValid for patterns checks each pattern and returns if a match has
|
||||
// been found
|
||||
func (p patterns) IsValid(value string) bool {
|
||||
for _, pattern := range p {
|
||||
if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// inclusiveRules rules allow for rules to depend on one another
|
||||
type inclusiveRules []rule
|
||||
|
||||
// IsValid will return true if all rules are true
|
||||
func (r inclusiveRules) IsValid(value string) bool {
|
||||
for _, rule := range r {
|
||||
if !rule.IsValid(value) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,438 @@
|
|||
// Package v4 implements signing for AWS V4 signer
|
||||
package v4
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
authHeaderPrefix = "AWS4-HMAC-SHA256"
|
||||
timeFormat = "20060102T150405Z"
|
||||
shortTimeFormat = "20060102"
|
||||
)
|
||||
|
||||
var ignoredHeaders = rules{
|
||||
blacklist{
|
||||
mapRule{
|
||||
"Content-Length": struct{}{},
|
||||
"User-Agent": struct{}{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// requiredSignedHeaders is a whitelist for build canonical headers.
|
||||
var requiredSignedHeaders = rules{
|
||||
whitelist{
|
||||
mapRule{
|
||||
"Cache-Control": struct{}{},
|
||||
"Content-Disposition": struct{}{},
|
||||
"Content-Encoding": struct{}{},
|
||||
"Content-Language": struct{}{},
|
||||
"Content-Md5": struct{}{},
|
||||
"Content-Type": struct{}{},
|
||||
"Expires": struct{}{},
|
||||
"If-Match": struct{}{},
|
||||
"If-Modified-Since": struct{}{},
|
||||
"If-None-Match": struct{}{},
|
||||
"If-Unmodified-Since": struct{}{},
|
||||
"Range": struct{}{},
|
||||
"X-Amz-Acl": struct{}{},
|
||||
"X-Amz-Copy-Source": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Match": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
||||
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
||||
"X-Amz-Copy-Source-Range": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||
"X-Amz-Grant-Full-control": struct{}{},
|
||||
"X-Amz-Grant-Read": struct{}{},
|
||||
"X-Amz-Grant-Read-Acp": struct{}{},
|
||||
"X-Amz-Grant-Write": struct{}{},
|
||||
"X-Amz-Grant-Write-Acp": struct{}{},
|
||||
"X-Amz-Metadata-Directive": struct{}{},
|
||||
"X-Amz-Mfa": struct{}{},
|
||||
"X-Amz-Request-Payer": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||
"X-Amz-Storage-Class": struct{}{},
|
||||
"X-Amz-Website-Redirect-Location": struct{}{},
|
||||
},
|
||||
},
|
||||
patterns{"X-Amz-Meta-"},
|
||||
}
|
||||
|
||||
// allowedHoisting is a whitelist for build query headers. The boolean value
|
||||
// represents whether or not it is a pattern.
|
||||
var allowedQueryHoisting = inclusiveRules{
|
||||
blacklist{requiredSignedHeaders},
|
||||
patterns{"X-Amz-"},
|
||||
}
|
||||
|
||||
type signer struct {
|
||||
Request *http.Request
|
||||
Time time.Time
|
||||
ExpireTime time.Duration
|
||||
ServiceName string
|
||||
Region string
|
||||
CredValues credentials.Value
|
||||
Credentials *credentials.Credentials
|
||||
Query url.Values
|
||||
Body io.ReadSeeker
|
||||
Debug aws.LogLevelType
|
||||
Logger aws.Logger
|
||||
|
||||
isPresign bool
|
||||
formattedTime string
|
||||
formattedShortTime string
|
||||
|
||||
signedHeaders string
|
||||
canonicalHeaders string
|
||||
canonicalString string
|
||||
credentialString string
|
||||
stringToSign string
|
||||
signature string
|
||||
authorization string
|
||||
notHoist bool
|
||||
signedHeaderVals http.Header
|
||||
}
|
||||
|
||||
// Sign requests with signature version 4.
|
||||
//
|
||||
// Will sign the requests with the service config's Credentials object
|
||||
// Signing is skipped if the credentials is the credentials.AnonymousCredentials
|
||||
// object.
|
||||
func Sign(req *request.Request) {
|
||||
// If the request does not need to be signed ignore the signing of the
|
||||
// request if the AnonymousCredentials object is used.
|
||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||
return
|
||||
}
|
||||
|
||||
region := req.ClientInfo.SigningRegion
|
||||
if region == "" {
|
||||
region = aws.StringValue(req.Config.Region)
|
||||
}
|
||||
|
||||
name := req.ClientInfo.SigningName
|
||||
if name == "" {
|
||||
name = req.ClientInfo.ServiceName
|
||||
}
|
||||
|
||||
s := signer{
|
||||
Request: req.HTTPRequest,
|
||||
Time: req.Time,
|
||||
ExpireTime: req.ExpireTime,
|
||||
Query: req.HTTPRequest.URL.Query(),
|
||||
Body: req.Body,
|
||||
ServiceName: name,
|
||||
Region: region,
|
||||
Credentials: req.Config.Credentials,
|
||||
Debug: req.Config.LogLevel.Value(),
|
||||
Logger: req.Config.Logger,
|
||||
notHoist: req.NotHoist,
|
||||
}
|
||||
|
||||
req.Error = s.sign()
|
||||
req.SignedHeaderVals = s.signedHeaderVals
|
||||
}
|
||||
|
||||
func (v4 *signer) sign() error {
|
||||
if v4.ExpireTime != 0 {
|
||||
v4.isPresign = true
|
||||
}
|
||||
|
||||
if v4.isRequestSigned() {
|
||||
if !v4.Credentials.IsExpired() {
|
||||
// If the request is already signed, and the credentials have not
|
||||
// expired yet ignore the signing request.
|
||||
return nil
|
||||
}
|
||||
|
||||
// The credentials have expired for this request. The current signing
|
||||
// is invalid, and needs to be request because the request will fail.
|
||||
if v4.isPresign {
|
||||
v4.removePresign()
|
||||
// Update the request's query string to ensure the values stays in
|
||||
// sync in the case retrieving the new credentials fails.
|
||||
v4.Request.URL.RawQuery = v4.Query.Encode()
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
v4.CredValues, err = v4.Credentials.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if v4.isPresign {
|
||||
v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
|
||||
if v4.CredValues.SessionToken != "" {
|
||||
v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
|
||||
} else {
|
||||
v4.Query.Del("X-Amz-Security-Token")
|
||||
}
|
||||
} else if v4.CredValues.SessionToken != "" {
|
||||
v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
|
||||
}
|
||||
|
||||
v4.build()
|
||||
|
||||
if v4.Debug.Matches(aws.LogDebugWithSigning) {
|
||||
v4.logSigningInfo()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const logSignInfoMsg = `DEBUG: Request Signiture:
|
||||
---[ CANONICAL STRING ]-----------------------------
|
||||
%s
|
||||
---[ STRING TO SIGN ]--------------------------------
|
||||
%s%s
|
||||
-----------------------------------------------------`
|
||||
const logSignedURLMsg = `
|
||||
---[ SIGNED URL ]------------------------------------
|
||||
%s`
|
||||
|
||||
func (v4 *signer) logSigningInfo() {
|
||||
signedURLMsg := ""
|
||||
if v4.isPresign {
|
||||
signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String())
|
||||
}
|
||||
msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg)
|
||||
v4.Logger.Log(msg)
|
||||
}
|
||||
|
||||
func (v4 *signer) build() {
|
||||
|
||||
v4.buildTime() // no depends
|
||||
v4.buildCredentialString() // no depends
|
||||
|
||||
unsignedHeaders := v4.Request.Header
|
||||
if v4.isPresign {
|
||||
if !v4.notHoist {
|
||||
urlValues := url.Values{}
|
||||
urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
|
||||
for k := range urlValues {
|
||||
v4.Query[k] = urlValues[k]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
|
||||
v4.buildCanonicalString() // depends on canon headers / signed headers
|
||||
v4.buildStringToSign() // depends on canon string
|
||||
v4.buildSignature() // depends on string to sign
|
||||
|
||||
if v4.isPresign {
|
||||
v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature
|
||||
} else {
|
||||
parts := []string{
|
||||
authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString,
|
||||
"SignedHeaders=" + v4.signedHeaders,
|
||||
"Signature=" + v4.signature,
|
||||
}
|
||||
v4.Request.Header.Set("Authorization", strings.Join(parts, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func (v4 *signer) buildTime() {
|
||||
v4.formattedTime = v4.Time.UTC().Format(timeFormat)
|
||||
v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat)
|
||||
|
||||
if v4.isPresign {
|
||||
duration := int64(v4.ExpireTime / time.Second)
|
||||
v4.Query.Set("X-Amz-Date", v4.formattedTime)
|
||||
v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
|
||||
} else {
|
||||
v4.Request.Header.Set("X-Amz-Date", v4.formattedTime)
|
||||
}
|
||||
}
|
||||
|
||||
func (v4 *signer) buildCredentialString() {
|
||||
v4.credentialString = strings.Join([]string{
|
||||
v4.formattedShortTime,
|
||||
v4.Region,
|
||||
v4.ServiceName,
|
||||
"aws4_request",
|
||||
}, "/")
|
||||
|
||||
if v4.isPresign {
|
||||
v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString)
|
||||
}
|
||||
}
|
||||
|
||||
func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
|
||||
query := url.Values{}
|
||||
unsignedHeaders := http.Header{}
|
||||
for k, h := range header {
|
||||
if r.IsValid(k) {
|
||||
query[k] = h
|
||||
} else {
|
||||
unsignedHeaders[k] = h
|
||||
}
|
||||
}
|
||||
|
||||
return query, unsignedHeaders
|
||||
}
|
||||
func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) {
|
||||
var headers []string
|
||||
headers = append(headers, "host")
|
||||
for k, v := range header {
|
||||
canonicalKey := http.CanonicalHeaderKey(k)
|
||||
if !r.IsValid(canonicalKey) {
|
||||
continue // ignored header
|
||||
}
|
||||
|
||||
lowerCaseKey := strings.ToLower(k)
|
||||
headers = append(headers, lowerCaseKey)
|
||||
|
||||
if v4.signedHeaderVals == nil {
|
||||
v4.signedHeaderVals = make(http.Header)
|
||||
}
|
||||
v4.signedHeaderVals[lowerCaseKey] = v
|
||||
}
|
||||
sort.Strings(headers)
|
||||
|
||||
v4.signedHeaders = strings.Join(headers, ";")
|
||||
|
||||
if v4.isPresign {
|
||||
v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders)
|
||||
}
|
||||
|
||||
headerValues := make([]string, len(headers))
|
||||
for i, k := range headers {
|
||||
if k == "host" {
|
||||
headerValues[i] = "host:" + v4.Request.URL.Host
|
||||
} else {
|
||||
headerValues[i] = k + ":" +
|
||||
strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",")
|
||||
}
|
||||
}
|
||||
|
||||
v4.canonicalHeaders = strings.Join(headerValues, "\n")
|
||||
}
|
||||
|
||||
func (v4 *signer) buildCanonicalString() {
|
||||
v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1)
|
||||
uri := v4.Request.URL.Opaque
|
||||
if uri != "" {
|
||||
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
|
||||
} else {
|
||||
uri = v4.Request.URL.Path
|
||||
}
|
||||
if uri == "" {
|
||||
uri = "/"
|
||||
}
|
||||
|
||||
if v4.ServiceName != "s3" {
|
||||
uri = rest.EscapePath(uri, false)
|
||||
}
|
||||
|
||||
v4.canonicalString = strings.Join([]string{
|
||||
v4.Request.Method,
|
||||
uri,
|
||||
v4.Request.URL.RawQuery,
|
||||
v4.canonicalHeaders + "\n",
|
||||
v4.signedHeaders,
|
||||
v4.bodyDigest(),
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
func (v4 *signer) buildStringToSign() {
|
||||
v4.stringToSign = strings.Join([]string{
|
||||
authHeaderPrefix,
|
||||
v4.formattedTime,
|
||||
v4.credentialString,
|
||||
hex.EncodeToString(makeSha256([]byte(v4.canonicalString))),
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
func (v4 *signer) buildSignature() {
|
||||
secret := v4.CredValues.SecretAccessKey
|
||||
date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime))
|
||||
region := makeHmac(date, []byte(v4.Region))
|
||||
service := makeHmac(region, []byte(v4.ServiceName))
|
||||
credentials := makeHmac(service, []byte("aws4_request"))
|
||||
signature := makeHmac(credentials, []byte(v4.stringToSign))
|
||||
v4.signature = hex.EncodeToString(signature)
|
||||
}
|
||||
|
||||
func (v4 *signer) bodyDigest() string {
|
||||
hash := v4.Request.Header.Get("X-Amz-Content-Sha256")
|
||||
if hash == "" {
|
||||
if v4.isPresign && v4.ServiceName == "s3" {
|
||||
hash = "UNSIGNED-PAYLOAD"
|
||||
} else if v4.Body == nil {
|
||||
hash = hex.EncodeToString(makeSha256([]byte{}))
|
||||
} else {
|
||||
hash = hex.EncodeToString(makeSha256Reader(v4.Body))
|
||||
}
|
||||
v4.Request.Header.Add("X-Amz-Content-Sha256", hash)
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// isRequestSigned returns if the request is currently signed or presigned
|
||||
func (v4 *signer) isRequestSigned() bool {
|
||||
if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" {
|
||||
return true
|
||||
}
|
||||
if v4.Request.Header.Get("Authorization") != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// unsign removes signing flags for both signed and presigned requests.
|
||||
func (v4 *signer) removePresign() {
|
||||
v4.Query.Del("X-Amz-Algorithm")
|
||||
v4.Query.Del("X-Amz-Signature")
|
||||
v4.Query.Del("X-Amz-Security-Token")
|
||||
v4.Query.Del("X-Amz-Date")
|
||||
v4.Query.Del("X-Amz-Expires")
|
||||
v4.Query.Del("X-Amz-Credential")
|
||||
v4.Query.Del("X-Amz-SignedHeaders")
|
||||
}
|
||||
|
||||
func makeHmac(key []byte, data []byte) []byte {
|
||||
hash := hmac.New(sha256.New, key)
|
||||
hash.Write(data)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
func makeSha256(data []byte) []byte {
|
||||
hash := sha256.New()
|
||||
hash.Write(data)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
func makeSha256Reader(reader io.ReadSeeker) []byte {
|
||||
hash := sha256.New()
|
||||
start, _ := reader.Seek(0, 1)
|
||||
defer reader.Seek(start, 0)
|
||||
|
||||
io.Copy(hash, reader)
|
||||
return hash.Sum(nil)
|
||||
}
|
|
@ -0,0 +1,134 @@
|
|||
package waiter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// A Config provides a collection of configuration values to setup a generated
|
||||
// waiter code with.
|
||||
type Config struct {
|
||||
Name string
|
||||
Delay int
|
||||
MaxAttempts int
|
||||
Operation string
|
||||
Acceptors []WaitAcceptor
|
||||
}
|
||||
|
||||
// A WaitAcceptor provides the information needed to wait for an API operation
|
||||
// to complete.
|
||||
type WaitAcceptor struct {
|
||||
Expected interface{}
|
||||
Matcher string
|
||||
State string
|
||||
Argument string
|
||||
}
|
||||
|
||||
// A Waiter provides waiting for an operation to complete.
|
||||
type Waiter struct {
|
||||
Config
|
||||
Client interface{}
|
||||
Input interface{}
|
||||
}
|
||||
|
||||
// Wait waits for an operation to complete, expire max attempts, or fail. Error
|
||||
// is returned if the operation fails.
|
||||
func (w *Waiter) Wait() error {
|
||||
client := reflect.ValueOf(w.Client)
|
||||
in := reflect.ValueOf(w.Input)
|
||||
method := client.MethodByName(w.Config.Operation + "Request")
|
||||
|
||||
for i := 0; i < w.MaxAttempts; i++ {
|
||||
res := method.Call([]reflect.Value{in})
|
||||
req := res[0].Interface().(*request.Request)
|
||||
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter"))
|
||||
|
||||
err := req.Send()
|
||||
for _, a := range w.Acceptors {
|
||||
result := false
|
||||
var vals []interface{}
|
||||
switch a.Matcher {
|
||||
case "pathAll", "path":
|
||||
// Require all matches to be equal for result to match
|
||||
vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
|
||||
if len(vals) == 0 {
|
||||
break
|
||||
}
|
||||
result = true
|
||||
for _, val := range vals {
|
||||
if !awsutil.DeepEqual(val, a.Expected) {
|
||||
result = false
|
||||
break
|
||||
}
|
||||
}
|
||||
case "pathAny":
|
||||
// Only a single match needs to equal for the result to match
|
||||
vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
|
||||
for _, val := range vals {
|
||||
if awsutil.DeepEqual(val, a.Expected) {
|
||||
result = true
|
||||
break
|
||||
}
|
||||
}
|
||||
case "status":
|
||||
s := a.Expected.(int)
|
||||
result = s == req.HTTPResponse.StatusCode
|
||||
case "error":
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
result = aerr.Code() == a.Expected.(string)
|
||||
}
|
||||
case "pathList":
|
||||
// ignored matcher
|
||||
default:
|
||||
logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s",
|
||||
w.Config.Operation, a.Matcher)
|
||||
}
|
||||
|
||||
if !result {
|
||||
// If there was no matching result found there is nothing more to do
|
||||
// for this response, retry the request.
|
||||
continue
|
||||
}
|
||||
|
||||
switch a.State {
|
||||
case "success":
|
||||
// waiter completed
|
||||
return nil
|
||||
case "failure":
|
||||
// Waiter failure state triggered
|
||||
return awserr.New("ResourceNotReady",
|
||||
fmt.Sprintf("failed waiting for successful resource state"), err)
|
||||
case "retry":
|
||||
// clear the error and retry the operation
|
||||
err = nil
|
||||
default:
|
||||
logf(client, "WARNING: Waiter for %s encountered unexpected state: %s",
|
||||
w.Config.Operation, a.State)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
time.Sleep(time.Second * time.Duration(w.Delay))
|
||||
}
|
||||
|
||||
return awserr.New("ResourceNotReady",
|
||||
fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil)
|
||||
}
|
||||
|
||||
func logf(client reflect.Value, msg string, args ...interface{}) {
|
||||
cfgVal := client.FieldByName("Config")
|
||||
if !cfgVal.IsValid() {
|
||||
return
|
||||
}
|
||||
if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil {
|
||||
cfg.Logger.Log(fmt.Sprintf(msg, args...))
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,55 @@
|
|||
package ec2
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/endpoints"
|
||||
)
|
||||
|
||||
func init() {
|
||||
initRequest = func(r *request.Request) {
|
||||
if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter
|
||||
r.Handlers.Build.PushFront(fillPresignedURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fillPresignedURL(r *request.Request) {
|
||||
if !r.ParamsFilled() {
|
||||
return
|
||||
}
|
||||
|
||||
origParams := r.Params.(*CopySnapshotInput)
|
||||
|
||||
// Stop if PresignedURL/DestinationRegion is set
|
||||
if origParams.PresignedUrl != nil || origParams.DestinationRegion != nil {
|
||||
return
|
||||
}
|
||||
|
||||
origParams.DestinationRegion = r.Config.Region
|
||||
newParams := awsutil.CopyOf(r.Params).(*CopySnapshotInput)
|
||||
|
||||
// Create a new request based on the existing request. We will use this to
|
||||
// presign the CopySnapshot request against the source region.
|
||||
cfg := r.Config.Copy(aws.NewConfig().
|
||||
WithEndpoint("").
|
||||
WithRegion(aws.StringValue(origParams.SourceRegion)))
|
||||
|
||||
clientInfo := r.ClientInfo
|
||||
clientInfo.Endpoint, clientInfo.SigningRegion = endpoints.EndpointForRegion(
|
||||
clientInfo.ServiceName, aws.StringValue(cfg.Region), aws.BoolValue(cfg.DisableSSL))
|
||||
|
||||
// Presign a CopySnapshot request with modified params
|
||||
req := request.New(*cfg, clientInfo, r.Handlers, r.Retryer, r.Operation, newParams, r.Data)
|
||||
url, err := req.Presign(5 * time.Minute) // 5 minutes should be enough.
|
||||
if err != nil { // bubble error back up to original request
|
||||
r.Error = err
|
||||
return
|
||||
}
|
||||
|
||||
// We have our URL, set it on params
|
||||
origParams.PresignedUrl = &url
|
||||
}
|
832
vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go
generated
vendored
Normal file
832
vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go
generated
vendored
Normal file
|
@ -0,0 +1,832 @@
|
|||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
// Package ec2iface provides an interface for the Amazon Elastic Compute Cloud.
|
||||
package ec2iface
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
)
|
||||
|
||||
// EC2API is the interface type for ec2.EC2.
|
||||
type EC2API interface {
|
||||
AcceptVpcPeeringConnectionRequest(*ec2.AcceptVpcPeeringConnectionInput) (*request.Request, *ec2.AcceptVpcPeeringConnectionOutput)
|
||||
|
||||
AcceptVpcPeeringConnection(*ec2.AcceptVpcPeeringConnectionInput) (*ec2.AcceptVpcPeeringConnectionOutput, error)
|
||||
|
||||
AllocateAddressRequest(*ec2.AllocateAddressInput) (*request.Request, *ec2.AllocateAddressOutput)
|
||||
|
||||
AllocateAddress(*ec2.AllocateAddressInput) (*ec2.AllocateAddressOutput, error)
|
||||
|
||||
AllocateHostsRequest(*ec2.AllocateHostsInput) (*request.Request, *ec2.AllocateHostsOutput)
|
||||
|
||||
AllocateHosts(*ec2.AllocateHostsInput) (*ec2.AllocateHostsOutput, error)
|
||||
|
||||
AssignPrivateIpAddressesRequest(*ec2.AssignPrivateIpAddressesInput) (*request.Request, *ec2.AssignPrivateIpAddressesOutput)
|
||||
|
||||
AssignPrivateIpAddresses(*ec2.AssignPrivateIpAddressesInput) (*ec2.AssignPrivateIpAddressesOutput, error)
|
||||
|
||||
AssociateAddressRequest(*ec2.AssociateAddressInput) (*request.Request, *ec2.AssociateAddressOutput)
|
||||
|
||||
AssociateAddress(*ec2.AssociateAddressInput) (*ec2.AssociateAddressOutput, error)
|
||||
|
||||
AssociateDhcpOptionsRequest(*ec2.AssociateDhcpOptionsInput) (*request.Request, *ec2.AssociateDhcpOptionsOutput)
|
||||
|
||||
AssociateDhcpOptions(*ec2.AssociateDhcpOptionsInput) (*ec2.AssociateDhcpOptionsOutput, error)
|
||||
|
||||
AssociateRouteTableRequest(*ec2.AssociateRouteTableInput) (*request.Request, *ec2.AssociateRouteTableOutput)
|
||||
|
||||
AssociateRouteTable(*ec2.AssociateRouteTableInput) (*ec2.AssociateRouteTableOutput, error)
|
||||
|
||||
AttachClassicLinkVpcRequest(*ec2.AttachClassicLinkVpcInput) (*request.Request, *ec2.AttachClassicLinkVpcOutput)
|
||||
|
||||
AttachClassicLinkVpc(*ec2.AttachClassicLinkVpcInput) (*ec2.AttachClassicLinkVpcOutput, error)
|
||||
|
||||
AttachInternetGatewayRequest(*ec2.AttachInternetGatewayInput) (*request.Request, *ec2.AttachInternetGatewayOutput)
|
||||
|
||||
AttachInternetGateway(*ec2.AttachInternetGatewayInput) (*ec2.AttachInternetGatewayOutput, error)
|
||||
|
||||
AttachNetworkInterfaceRequest(*ec2.AttachNetworkInterfaceInput) (*request.Request, *ec2.AttachNetworkInterfaceOutput)
|
||||
|
||||
AttachNetworkInterface(*ec2.AttachNetworkInterfaceInput) (*ec2.AttachNetworkInterfaceOutput, error)
|
||||
|
||||
AttachVolumeRequest(*ec2.AttachVolumeInput) (*request.Request, *ec2.VolumeAttachment)
|
||||
|
||||
AttachVolume(*ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error)
|
||||
|
||||
AttachVpnGatewayRequest(*ec2.AttachVpnGatewayInput) (*request.Request, *ec2.AttachVpnGatewayOutput)
|
||||
|
||||
AttachVpnGateway(*ec2.AttachVpnGatewayInput) (*ec2.AttachVpnGatewayOutput, error)
|
||||
|
||||
AuthorizeSecurityGroupEgressRequest(*ec2.AuthorizeSecurityGroupEgressInput) (*request.Request, *ec2.AuthorizeSecurityGroupEgressOutput)
|
||||
|
||||
AuthorizeSecurityGroupEgress(*ec2.AuthorizeSecurityGroupEgressInput) (*ec2.AuthorizeSecurityGroupEgressOutput, error)
|
||||
|
||||
AuthorizeSecurityGroupIngressRequest(*ec2.AuthorizeSecurityGroupIngressInput) (*request.Request, *ec2.AuthorizeSecurityGroupIngressOutput)
|
||||
|
||||
AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error)
|
||||
|
||||
BundleInstanceRequest(*ec2.BundleInstanceInput) (*request.Request, *ec2.BundleInstanceOutput)
|
||||
|
||||
BundleInstance(*ec2.BundleInstanceInput) (*ec2.BundleInstanceOutput, error)
|
||||
|
||||
CancelBundleTaskRequest(*ec2.CancelBundleTaskInput) (*request.Request, *ec2.CancelBundleTaskOutput)
|
||||
|
||||
CancelBundleTask(*ec2.CancelBundleTaskInput) (*ec2.CancelBundleTaskOutput, error)
|
||||
|
||||
CancelConversionTaskRequest(*ec2.CancelConversionTaskInput) (*request.Request, *ec2.CancelConversionTaskOutput)
|
||||
|
||||
CancelConversionTask(*ec2.CancelConversionTaskInput) (*ec2.CancelConversionTaskOutput, error)
|
||||
|
||||
CancelExportTaskRequest(*ec2.CancelExportTaskInput) (*request.Request, *ec2.CancelExportTaskOutput)
|
||||
|
||||
CancelExportTask(*ec2.CancelExportTaskInput) (*ec2.CancelExportTaskOutput, error)
|
||||
|
||||
CancelImportTaskRequest(*ec2.CancelImportTaskInput) (*request.Request, *ec2.CancelImportTaskOutput)
|
||||
|
||||
CancelImportTask(*ec2.CancelImportTaskInput) (*ec2.CancelImportTaskOutput, error)
|
||||
|
||||
CancelReservedInstancesListingRequest(*ec2.CancelReservedInstancesListingInput) (*request.Request, *ec2.CancelReservedInstancesListingOutput)
|
||||
|
||||
CancelReservedInstancesListing(*ec2.CancelReservedInstancesListingInput) (*ec2.CancelReservedInstancesListingOutput, error)
|
||||
|
||||
CancelSpotFleetRequestsRequest(*ec2.CancelSpotFleetRequestsInput) (*request.Request, *ec2.CancelSpotFleetRequestsOutput)
|
||||
|
||||
CancelSpotFleetRequests(*ec2.CancelSpotFleetRequestsInput) (*ec2.CancelSpotFleetRequestsOutput, error)
|
||||
|
||||
CancelSpotInstanceRequestsRequest(*ec2.CancelSpotInstanceRequestsInput) (*request.Request, *ec2.CancelSpotInstanceRequestsOutput)
|
||||
|
||||
CancelSpotInstanceRequests(*ec2.CancelSpotInstanceRequestsInput) (*ec2.CancelSpotInstanceRequestsOutput, error)
|
||||
|
||||
ConfirmProductInstanceRequest(*ec2.ConfirmProductInstanceInput) (*request.Request, *ec2.ConfirmProductInstanceOutput)
|
||||
|
||||
ConfirmProductInstance(*ec2.ConfirmProductInstanceInput) (*ec2.ConfirmProductInstanceOutput, error)
|
||||
|
||||
CopyImageRequest(*ec2.CopyImageInput) (*request.Request, *ec2.CopyImageOutput)
|
||||
|
||||
CopyImage(*ec2.CopyImageInput) (*ec2.CopyImageOutput, error)
|
||||
|
||||
CopySnapshotRequest(*ec2.CopySnapshotInput) (*request.Request, *ec2.CopySnapshotOutput)
|
||||
|
||||
CopySnapshot(*ec2.CopySnapshotInput) (*ec2.CopySnapshotOutput, error)
|
||||
|
||||
CreateCustomerGatewayRequest(*ec2.CreateCustomerGatewayInput) (*request.Request, *ec2.CreateCustomerGatewayOutput)
|
||||
|
||||
CreateCustomerGateway(*ec2.CreateCustomerGatewayInput) (*ec2.CreateCustomerGatewayOutput, error)
|
||||
|
||||
CreateDhcpOptionsRequest(*ec2.CreateDhcpOptionsInput) (*request.Request, *ec2.CreateDhcpOptionsOutput)
|
||||
|
||||
CreateDhcpOptions(*ec2.CreateDhcpOptionsInput) (*ec2.CreateDhcpOptionsOutput, error)
|
||||
|
||||
CreateFlowLogsRequest(*ec2.CreateFlowLogsInput) (*request.Request, *ec2.CreateFlowLogsOutput)
|
||||
|
||||
CreateFlowLogs(*ec2.CreateFlowLogsInput) (*ec2.CreateFlowLogsOutput, error)
|
||||
|
||||
CreateImageRequest(*ec2.CreateImageInput) (*request.Request, *ec2.CreateImageOutput)
|
||||
|
||||
CreateImage(*ec2.CreateImageInput) (*ec2.CreateImageOutput, error)
|
||||
|
||||
CreateInstanceExportTaskRequest(*ec2.CreateInstanceExportTaskInput) (*request.Request, *ec2.CreateInstanceExportTaskOutput)
|
||||
|
||||
CreateInstanceExportTask(*ec2.CreateInstanceExportTaskInput) (*ec2.CreateInstanceExportTaskOutput, error)
|
||||
|
||||
CreateInternetGatewayRequest(*ec2.CreateInternetGatewayInput) (*request.Request, *ec2.CreateInternetGatewayOutput)
|
||||
|
||||
CreateInternetGateway(*ec2.CreateInternetGatewayInput) (*ec2.CreateInternetGatewayOutput, error)
|
||||
|
||||
CreateKeyPairRequest(*ec2.CreateKeyPairInput) (*request.Request, *ec2.CreateKeyPairOutput)
|
||||
|
||||
CreateKeyPair(*ec2.CreateKeyPairInput) (*ec2.CreateKeyPairOutput, error)
|
||||
|
||||
CreateNatGatewayRequest(*ec2.CreateNatGatewayInput) (*request.Request, *ec2.CreateNatGatewayOutput)
|
||||
|
||||
CreateNatGateway(*ec2.CreateNatGatewayInput) (*ec2.CreateNatGatewayOutput, error)
|
||||
|
||||
CreateNetworkAclRequest(*ec2.CreateNetworkAclInput) (*request.Request, *ec2.CreateNetworkAclOutput)
|
||||
|
||||
CreateNetworkAcl(*ec2.CreateNetworkAclInput) (*ec2.CreateNetworkAclOutput, error)
|
||||
|
||||
CreateNetworkAclEntryRequest(*ec2.CreateNetworkAclEntryInput) (*request.Request, *ec2.CreateNetworkAclEntryOutput)
|
||||
|
||||
CreateNetworkAclEntry(*ec2.CreateNetworkAclEntryInput) (*ec2.CreateNetworkAclEntryOutput, error)
|
||||
|
||||
CreateNetworkInterfaceRequest(*ec2.CreateNetworkInterfaceInput) (*request.Request, *ec2.CreateNetworkInterfaceOutput)
|
||||
|
||||
CreateNetworkInterface(*ec2.CreateNetworkInterfaceInput) (*ec2.CreateNetworkInterfaceOutput, error)
|
||||
|
||||
CreatePlacementGroupRequest(*ec2.CreatePlacementGroupInput) (*request.Request, *ec2.CreatePlacementGroupOutput)
|
||||
|
||||
CreatePlacementGroup(*ec2.CreatePlacementGroupInput) (*ec2.CreatePlacementGroupOutput, error)
|
||||
|
||||
CreateReservedInstancesListingRequest(*ec2.CreateReservedInstancesListingInput) (*request.Request, *ec2.CreateReservedInstancesListingOutput)
|
||||
|
||||
CreateReservedInstancesListing(*ec2.CreateReservedInstancesListingInput) (*ec2.CreateReservedInstancesListingOutput, error)
|
||||
|
||||
CreateRouteRequest(*ec2.CreateRouteInput) (*request.Request, *ec2.CreateRouteOutput)
|
||||
|
||||
CreateRoute(*ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error)
|
||||
|
||||
CreateRouteTableRequest(*ec2.CreateRouteTableInput) (*request.Request, *ec2.CreateRouteTableOutput)
|
||||
|
||||
CreateRouteTable(*ec2.CreateRouteTableInput) (*ec2.CreateRouteTableOutput, error)
|
||||
|
||||
CreateSecurityGroupRequest(*ec2.CreateSecurityGroupInput) (*request.Request, *ec2.CreateSecurityGroupOutput)
|
||||
|
||||
CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error)
|
||||
|
||||
CreateSnapshotRequest(*ec2.CreateSnapshotInput) (*request.Request, *ec2.Snapshot)
|
||||
|
||||
CreateSnapshot(*ec2.CreateSnapshotInput) (*ec2.Snapshot, error)
|
||||
|
||||
CreateSpotDatafeedSubscriptionRequest(*ec2.CreateSpotDatafeedSubscriptionInput) (*request.Request, *ec2.CreateSpotDatafeedSubscriptionOutput)
|
||||
|
||||
CreateSpotDatafeedSubscription(*ec2.CreateSpotDatafeedSubscriptionInput) (*ec2.CreateSpotDatafeedSubscriptionOutput, error)
|
||||
|
||||
CreateSubnetRequest(*ec2.CreateSubnetInput) (*request.Request, *ec2.CreateSubnetOutput)
|
||||
|
||||
CreateSubnet(*ec2.CreateSubnetInput) (*ec2.CreateSubnetOutput, error)
|
||||
|
||||
CreateTagsRequest(*ec2.CreateTagsInput) (*request.Request, *ec2.CreateTagsOutput)
|
||||
|
||||
CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error)
|
||||
|
||||
CreateVolumeRequest(*ec2.CreateVolumeInput) (*request.Request, *ec2.Volume)
|
||||
|
||||
CreateVolume(*ec2.CreateVolumeInput) (*ec2.Volume, error)
|
||||
|
||||
CreateVpcRequest(*ec2.CreateVpcInput) (*request.Request, *ec2.CreateVpcOutput)
|
||||
|
||||
CreateVpc(*ec2.CreateVpcInput) (*ec2.CreateVpcOutput, error)
|
||||
|
||||
CreateVpcEndpointRequest(*ec2.CreateVpcEndpointInput) (*request.Request, *ec2.CreateVpcEndpointOutput)
|
||||
|
||||
CreateVpcEndpoint(*ec2.CreateVpcEndpointInput) (*ec2.CreateVpcEndpointOutput, error)
|
||||
|
||||
CreateVpcPeeringConnectionRequest(*ec2.CreateVpcPeeringConnectionInput) (*request.Request, *ec2.CreateVpcPeeringConnectionOutput)
|
||||
|
||||
CreateVpcPeeringConnection(*ec2.CreateVpcPeeringConnectionInput) (*ec2.CreateVpcPeeringConnectionOutput, error)
|
||||
|
||||
CreateVpnConnectionRequest(*ec2.CreateVpnConnectionInput) (*request.Request, *ec2.CreateVpnConnectionOutput)
|
||||
|
||||
CreateVpnConnection(*ec2.CreateVpnConnectionInput) (*ec2.CreateVpnConnectionOutput, error)
|
||||
|
||||
CreateVpnConnectionRouteRequest(*ec2.CreateVpnConnectionRouteInput) (*request.Request, *ec2.CreateVpnConnectionRouteOutput)
|
||||
|
||||
CreateVpnConnectionRoute(*ec2.CreateVpnConnectionRouteInput) (*ec2.CreateVpnConnectionRouteOutput, error)
|
||||
|
||||
CreateVpnGatewayRequest(*ec2.CreateVpnGatewayInput) (*request.Request, *ec2.CreateVpnGatewayOutput)
|
||||
|
||||
CreateVpnGateway(*ec2.CreateVpnGatewayInput) (*ec2.CreateVpnGatewayOutput, error)
|
||||
|
||||
DeleteCustomerGatewayRequest(*ec2.DeleteCustomerGatewayInput) (*request.Request, *ec2.DeleteCustomerGatewayOutput)
|
||||
|
||||
DeleteCustomerGateway(*ec2.DeleteCustomerGatewayInput) (*ec2.DeleteCustomerGatewayOutput, error)
|
||||
|
||||
DeleteDhcpOptionsRequest(*ec2.DeleteDhcpOptionsInput) (*request.Request, *ec2.DeleteDhcpOptionsOutput)
|
||||
|
||||
DeleteDhcpOptions(*ec2.DeleteDhcpOptionsInput) (*ec2.DeleteDhcpOptionsOutput, error)
|
||||
|
||||
DeleteFlowLogsRequest(*ec2.DeleteFlowLogsInput) (*request.Request, *ec2.DeleteFlowLogsOutput)
|
||||
|
||||
DeleteFlowLogs(*ec2.DeleteFlowLogsInput) (*ec2.DeleteFlowLogsOutput, error)
|
||||
|
||||
DeleteInternetGatewayRequest(*ec2.DeleteInternetGatewayInput) (*request.Request, *ec2.DeleteInternetGatewayOutput)
|
||||
|
||||
DeleteInternetGateway(*ec2.DeleteInternetGatewayInput) (*ec2.DeleteInternetGatewayOutput, error)
|
||||
|
||||
DeleteKeyPairRequest(*ec2.DeleteKeyPairInput) (*request.Request, *ec2.DeleteKeyPairOutput)
|
||||
|
||||
DeleteKeyPair(*ec2.DeleteKeyPairInput) (*ec2.DeleteKeyPairOutput, error)
|
||||
|
||||
DeleteNatGatewayRequest(*ec2.DeleteNatGatewayInput) (*request.Request, *ec2.DeleteNatGatewayOutput)
|
||||
|
||||
DeleteNatGateway(*ec2.DeleteNatGatewayInput) (*ec2.DeleteNatGatewayOutput, error)
|
||||
|
||||
DeleteNetworkAclRequest(*ec2.DeleteNetworkAclInput) (*request.Request, *ec2.DeleteNetworkAclOutput)
|
||||
|
||||
DeleteNetworkAcl(*ec2.DeleteNetworkAclInput) (*ec2.DeleteNetworkAclOutput, error)
|
||||
|
||||
DeleteNetworkAclEntryRequest(*ec2.DeleteNetworkAclEntryInput) (*request.Request, *ec2.DeleteNetworkAclEntryOutput)
|
||||
|
||||
DeleteNetworkAclEntry(*ec2.DeleteNetworkAclEntryInput) (*ec2.DeleteNetworkAclEntryOutput, error)
|
||||
|
||||
DeleteNetworkInterfaceRequest(*ec2.DeleteNetworkInterfaceInput) (*request.Request, *ec2.DeleteNetworkInterfaceOutput)
|
||||
|
||||
DeleteNetworkInterface(*ec2.DeleteNetworkInterfaceInput) (*ec2.DeleteNetworkInterfaceOutput, error)
|
||||
|
||||
DeletePlacementGroupRequest(*ec2.DeletePlacementGroupInput) (*request.Request, *ec2.DeletePlacementGroupOutput)
|
||||
|
||||
DeletePlacementGroup(*ec2.DeletePlacementGroupInput) (*ec2.DeletePlacementGroupOutput, error)
|
||||
|
||||
DeleteRouteRequest(*ec2.DeleteRouteInput) (*request.Request, *ec2.DeleteRouteOutput)
|
||||
|
||||
DeleteRoute(*ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error)
|
||||
|
||||
DeleteRouteTableRequest(*ec2.DeleteRouteTableInput) (*request.Request, *ec2.DeleteRouteTableOutput)
|
||||
|
||||
DeleteRouteTable(*ec2.DeleteRouteTableInput) (*ec2.DeleteRouteTableOutput, error)
|
||||
|
||||
DeleteSecurityGroupRequest(*ec2.DeleteSecurityGroupInput) (*request.Request, *ec2.DeleteSecurityGroupOutput)
|
||||
|
||||
DeleteSecurityGroup(*ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error)
|
||||
|
||||
DeleteSnapshotRequest(*ec2.DeleteSnapshotInput) (*request.Request, *ec2.DeleteSnapshotOutput)
|
||||
|
||||
DeleteSnapshot(*ec2.DeleteSnapshotInput) (*ec2.DeleteSnapshotOutput, error)
|
||||
|
||||
DeleteSpotDatafeedSubscriptionRequest(*ec2.DeleteSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DeleteSpotDatafeedSubscriptionOutput)
|
||||
|
||||
DeleteSpotDatafeedSubscription(*ec2.DeleteSpotDatafeedSubscriptionInput) (*ec2.DeleteSpotDatafeedSubscriptionOutput, error)
|
||||
|
||||
DeleteSubnetRequest(*ec2.DeleteSubnetInput) (*request.Request, *ec2.DeleteSubnetOutput)
|
||||
|
||||
DeleteSubnet(*ec2.DeleteSubnetInput) (*ec2.DeleteSubnetOutput, error)
|
||||
|
||||
DeleteTagsRequest(*ec2.DeleteTagsInput) (*request.Request, *ec2.DeleteTagsOutput)
|
||||
|
||||
DeleteTags(*ec2.DeleteTagsInput) (*ec2.DeleteTagsOutput, error)
|
||||
|
||||
DeleteVolumeRequest(*ec2.DeleteVolumeInput) (*request.Request, *ec2.DeleteVolumeOutput)
|
||||
|
||||
DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error)
|
||||
|
||||
DeleteVpcRequest(*ec2.DeleteVpcInput) (*request.Request, *ec2.DeleteVpcOutput)
|
||||
|
||||
DeleteVpc(*ec2.DeleteVpcInput) (*ec2.DeleteVpcOutput, error)
|
||||
|
||||
DeleteVpcEndpointsRequest(*ec2.DeleteVpcEndpointsInput) (*request.Request, *ec2.DeleteVpcEndpointsOutput)
|
||||
|
||||
DeleteVpcEndpoints(*ec2.DeleteVpcEndpointsInput) (*ec2.DeleteVpcEndpointsOutput, error)
|
||||
|
||||
DeleteVpcPeeringConnectionRequest(*ec2.DeleteVpcPeeringConnectionInput) (*request.Request, *ec2.DeleteVpcPeeringConnectionOutput)
|
||||
|
||||
DeleteVpcPeeringConnection(*ec2.DeleteVpcPeeringConnectionInput) (*ec2.DeleteVpcPeeringConnectionOutput, error)
|
||||
|
||||
DeleteVpnConnectionRequest(*ec2.DeleteVpnConnectionInput) (*request.Request, *ec2.DeleteVpnConnectionOutput)
|
||||
|
||||
DeleteVpnConnection(*ec2.DeleteVpnConnectionInput) (*ec2.DeleteVpnConnectionOutput, error)
|
||||
|
||||
DeleteVpnConnectionRouteRequest(*ec2.DeleteVpnConnectionRouteInput) (*request.Request, *ec2.DeleteVpnConnectionRouteOutput)
|
||||
|
||||
DeleteVpnConnectionRoute(*ec2.DeleteVpnConnectionRouteInput) (*ec2.DeleteVpnConnectionRouteOutput, error)
|
||||
|
||||
DeleteVpnGatewayRequest(*ec2.DeleteVpnGatewayInput) (*request.Request, *ec2.DeleteVpnGatewayOutput)
|
||||
|
||||
DeleteVpnGateway(*ec2.DeleteVpnGatewayInput) (*ec2.DeleteVpnGatewayOutput, error)
|
||||
|
||||
DeregisterImageRequest(*ec2.DeregisterImageInput) (*request.Request, *ec2.DeregisterImageOutput)
|
||||
|
||||
DeregisterImage(*ec2.DeregisterImageInput) (*ec2.DeregisterImageOutput, error)
|
||||
|
||||
DescribeAccountAttributesRequest(*ec2.DescribeAccountAttributesInput) (*request.Request, *ec2.DescribeAccountAttributesOutput)
|
||||
|
||||
DescribeAccountAttributes(*ec2.DescribeAccountAttributesInput) (*ec2.DescribeAccountAttributesOutput, error)
|
||||
|
||||
DescribeAddressesRequest(*ec2.DescribeAddressesInput) (*request.Request, *ec2.DescribeAddressesOutput)
|
||||
|
||||
DescribeAddresses(*ec2.DescribeAddressesInput) (*ec2.DescribeAddressesOutput, error)
|
||||
|
||||
DescribeAvailabilityZonesRequest(*ec2.DescribeAvailabilityZonesInput) (*request.Request, *ec2.DescribeAvailabilityZonesOutput)
|
||||
|
||||
DescribeAvailabilityZones(*ec2.DescribeAvailabilityZonesInput) (*ec2.DescribeAvailabilityZonesOutput, error)
|
||||
|
||||
DescribeBundleTasksRequest(*ec2.DescribeBundleTasksInput) (*request.Request, *ec2.DescribeBundleTasksOutput)
|
||||
|
||||
DescribeBundleTasks(*ec2.DescribeBundleTasksInput) (*ec2.DescribeBundleTasksOutput, error)
|
||||
|
||||
DescribeClassicLinkInstancesRequest(*ec2.DescribeClassicLinkInstancesInput) (*request.Request, *ec2.DescribeClassicLinkInstancesOutput)
|
||||
|
||||
DescribeClassicLinkInstances(*ec2.DescribeClassicLinkInstancesInput) (*ec2.DescribeClassicLinkInstancesOutput, error)
|
||||
|
||||
DescribeConversionTasksRequest(*ec2.DescribeConversionTasksInput) (*request.Request, *ec2.DescribeConversionTasksOutput)
|
||||
|
||||
DescribeConversionTasks(*ec2.DescribeConversionTasksInput) (*ec2.DescribeConversionTasksOutput, error)
|
||||
|
||||
DescribeCustomerGatewaysRequest(*ec2.DescribeCustomerGatewaysInput) (*request.Request, *ec2.DescribeCustomerGatewaysOutput)
|
||||
|
||||
DescribeCustomerGateways(*ec2.DescribeCustomerGatewaysInput) (*ec2.DescribeCustomerGatewaysOutput, error)
|
||||
|
||||
DescribeDhcpOptionsRequest(*ec2.DescribeDhcpOptionsInput) (*request.Request, *ec2.DescribeDhcpOptionsOutput)
|
||||
|
||||
DescribeDhcpOptions(*ec2.DescribeDhcpOptionsInput) (*ec2.DescribeDhcpOptionsOutput, error)
|
||||
|
||||
DescribeExportTasksRequest(*ec2.DescribeExportTasksInput) (*request.Request, *ec2.DescribeExportTasksOutput)
|
||||
|
||||
DescribeExportTasks(*ec2.DescribeExportTasksInput) (*ec2.DescribeExportTasksOutput, error)
|
||||
|
||||
DescribeFlowLogsRequest(*ec2.DescribeFlowLogsInput) (*request.Request, *ec2.DescribeFlowLogsOutput)
|
||||
|
||||
DescribeFlowLogs(*ec2.DescribeFlowLogsInput) (*ec2.DescribeFlowLogsOutput, error)
|
||||
|
||||
DescribeHostsRequest(*ec2.DescribeHostsInput) (*request.Request, *ec2.DescribeHostsOutput)
|
||||
|
||||
DescribeHosts(*ec2.DescribeHostsInput) (*ec2.DescribeHostsOutput, error)
|
||||
|
||||
DescribeIdFormatRequest(*ec2.DescribeIdFormatInput) (*request.Request, *ec2.DescribeIdFormatOutput)
|
||||
|
||||
DescribeIdFormat(*ec2.DescribeIdFormatInput) (*ec2.DescribeIdFormatOutput, error)
|
||||
|
||||
DescribeImageAttributeRequest(*ec2.DescribeImageAttributeInput) (*request.Request, *ec2.DescribeImageAttributeOutput)
|
||||
|
||||
DescribeImageAttribute(*ec2.DescribeImageAttributeInput) (*ec2.DescribeImageAttributeOutput, error)
|
||||
|
||||
DescribeImagesRequest(*ec2.DescribeImagesInput) (*request.Request, *ec2.DescribeImagesOutput)
|
||||
|
||||
DescribeImages(*ec2.DescribeImagesInput) (*ec2.DescribeImagesOutput, error)
|
||||
|
||||
DescribeImportImageTasksRequest(*ec2.DescribeImportImageTasksInput) (*request.Request, *ec2.DescribeImportImageTasksOutput)
|
||||
|
||||
DescribeImportImageTasks(*ec2.DescribeImportImageTasksInput) (*ec2.DescribeImportImageTasksOutput, error)
|
||||
|
||||
DescribeImportSnapshotTasksRequest(*ec2.DescribeImportSnapshotTasksInput) (*request.Request, *ec2.DescribeImportSnapshotTasksOutput)
|
||||
|
||||
DescribeImportSnapshotTasks(*ec2.DescribeImportSnapshotTasksInput) (*ec2.DescribeImportSnapshotTasksOutput, error)
|
||||
|
||||
DescribeInstanceAttributeRequest(*ec2.DescribeInstanceAttributeInput) (*request.Request, *ec2.DescribeInstanceAttributeOutput)
|
||||
|
||||
DescribeInstanceAttribute(*ec2.DescribeInstanceAttributeInput) (*ec2.DescribeInstanceAttributeOutput, error)
|
||||
|
||||
DescribeInstanceStatusRequest(*ec2.DescribeInstanceStatusInput) (*request.Request, *ec2.DescribeInstanceStatusOutput)
|
||||
|
||||
DescribeInstanceStatus(*ec2.DescribeInstanceStatusInput) (*ec2.DescribeInstanceStatusOutput, error)
|
||||
|
||||
DescribeInstanceStatusPages(*ec2.DescribeInstanceStatusInput, func(*ec2.DescribeInstanceStatusOutput, bool) bool) error
|
||||
|
||||
DescribeInstancesRequest(*ec2.DescribeInstancesInput) (*request.Request, *ec2.DescribeInstancesOutput)
|
||||
|
||||
DescribeInstances(*ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error)
|
||||
|
||||
DescribeInstancesPages(*ec2.DescribeInstancesInput, func(*ec2.DescribeInstancesOutput, bool) bool) error
|
||||
|
||||
DescribeInternetGatewaysRequest(*ec2.DescribeInternetGatewaysInput) (*request.Request, *ec2.DescribeInternetGatewaysOutput)
|
||||
|
||||
DescribeInternetGateways(*ec2.DescribeInternetGatewaysInput) (*ec2.DescribeInternetGatewaysOutput, error)
|
||||
|
||||
DescribeKeyPairsRequest(*ec2.DescribeKeyPairsInput) (*request.Request, *ec2.DescribeKeyPairsOutput)
|
||||
|
||||
DescribeKeyPairs(*ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error)
|
||||
|
||||
DescribeMovingAddressesRequest(*ec2.DescribeMovingAddressesInput) (*request.Request, *ec2.DescribeMovingAddressesOutput)
|
||||
|
||||
DescribeMovingAddresses(*ec2.DescribeMovingAddressesInput) (*ec2.DescribeMovingAddressesOutput, error)
|
||||
|
||||
DescribeNatGatewaysRequest(*ec2.DescribeNatGatewaysInput) (*request.Request, *ec2.DescribeNatGatewaysOutput)
|
||||
|
||||
DescribeNatGateways(*ec2.DescribeNatGatewaysInput) (*ec2.DescribeNatGatewaysOutput, error)
|
||||
|
||||
DescribeNetworkAclsRequest(*ec2.DescribeNetworkAclsInput) (*request.Request, *ec2.DescribeNetworkAclsOutput)
|
||||
|
||||
DescribeNetworkAcls(*ec2.DescribeNetworkAclsInput) (*ec2.DescribeNetworkAclsOutput, error)
|
||||
|
||||
DescribeNetworkInterfaceAttributeRequest(*ec2.DescribeNetworkInterfaceAttributeInput) (*request.Request, *ec2.DescribeNetworkInterfaceAttributeOutput)
|
||||
|
||||
DescribeNetworkInterfaceAttribute(*ec2.DescribeNetworkInterfaceAttributeInput) (*ec2.DescribeNetworkInterfaceAttributeOutput, error)
|
||||
|
||||
DescribeNetworkInterfacesRequest(*ec2.DescribeNetworkInterfacesInput) (*request.Request, *ec2.DescribeNetworkInterfacesOutput)
|
||||
|
||||
DescribeNetworkInterfaces(*ec2.DescribeNetworkInterfacesInput) (*ec2.DescribeNetworkInterfacesOutput, error)
|
||||
|
||||
DescribePlacementGroupsRequest(*ec2.DescribePlacementGroupsInput) (*request.Request, *ec2.DescribePlacementGroupsOutput)
|
||||
|
||||
DescribePlacementGroups(*ec2.DescribePlacementGroupsInput) (*ec2.DescribePlacementGroupsOutput, error)
|
||||
|
||||
DescribePrefixListsRequest(*ec2.DescribePrefixListsInput) (*request.Request, *ec2.DescribePrefixListsOutput)
|
||||
|
||||
DescribePrefixLists(*ec2.DescribePrefixListsInput) (*ec2.DescribePrefixListsOutput, error)
|
||||
|
||||
DescribeRegionsRequest(*ec2.DescribeRegionsInput) (*request.Request, *ec2.DescribeRegionsOutput)
|
||||
|
||||
DescribeRegions(*ec2.DescribeRegionsInput) (*ec2.DescribeRegionsOutput, error)
|
||||
|
||||
DescribeReservedInstancesRequest(*ec2.DescribeReservedInstancesInput) (*request.Request, *ec2.DescribeReservedInstancesOutput)
|
||||
|
||||
DescribeReservedInstances(*ec2.DescribeReservedInstancesInput) (*ec2.DescribeReservedInstancesOutput, error)
|
||||
|
||||
DescribeReservedInstancesListingsRequest(*ec2.DescribeReservedInstancesListingsInput) (*request.Request, *ec2.DescribeReservedInstancesListingsOutput)
|
||||
|
||||
DescribeReservedInstancesListings(*ec2.DescribeReservedInstancesListingsInput) (*ec2.DescribeReservedInstancesListingsOutput, error)
|
||||
|
||||
DescribeReservedInstancesModificationsRequest(*ec2.DescribeReservedInstancesModificationsInput) (*request.Request, *ec2.DescribeReservedInstancesModificationsOutput)
|
||||
|
||||
DescribeReservedInstancesModifications(*ec2.DescribeReservedInstancesModificationsInput) (*ec2.DescribeReservedInstancesModificationsOutput, error)
|
||||
|
||||
DescribeReservedInstancesModificationsPages(*ec2.DescribeReservedInstancesModificationsInput, func(*ec2.DescribeReservedInstancesModificationsOutput, bool) bool) error
|
||||
|
||||
DescribeReservedInstancesOfferingsRequest(*ec2.DescribeReservedInstancesOfferingsInput) (*request.Request, *ec2.DescribeReservedInstancesOfferingsOutput)
|
||||
|
||||
DescribeReservedInstancesOfferings(*ec2.DescribeReservedInstancesOfferingsInput) (*ec2.DescribeReservedInstancesOfferingsOutput, error)
|
||||
|
||||
DescribeReservedInstancesOfferingsPages(*ec2.DescribeReservedInstancesOfferingsInput, func(*ec2.DescribeReservedInstancesOfferingsOutput, bool) bool) error
|
||||
|
||||
DescribeRouteTablesRequest(*ec2.DescribeRouteTablesInput) (*request.Request, *ec2.DescribeRouteTablesOutput)
|
||||
|
||||
DescribeRouteTables(*ec2.DescribeRouteTablesInput) (*ec2.DescribeRouteTablesOutput, error)
|
||||
|
||||
DescribeScheduledInstanceAvailabilityRequest(*ec2.DescribeScheduledInstanceAvailabilityInput) (*request.Request, *ec2.DescribeScheduledInstanceAvailabilityOutput)
|
||||
|
||||
DescribeScheduledInstanceAvailability(*ec2.DescribeScheduledInstanceAvailabilityInput) (*ec2.DescribeScheduledInstanceAvailabilityOutput, error)
|
||||
|
||||
DescribeScheduledInstancesRequest(*ec2.DescribeScheduledInstancesInput) (*request.Request, *ec2.DescribeScheduledInstancesOutput)
|
||||
|
||||
DescribeScheduledInstances(*ec2.DescribeScheduledInstancesInput) (*ec2.DescribeScheduledInstancesOutput, error)
|
||||
|
||||
DescribeSecurityGroupsRequest(*ec2.DescribeSecurityGroupsInput) (*request.Request, *ec2.DescribeSecurityGroupsOutput)
|
||||
|
||||
DescribeSecurityGroups(*ec2.DescribeSecurityGroupsInput) (*ec2.DescribeSecurityGroupsOutput, error)
|
||||
|
||||
DescribeSnapshotAttributeRequest(*ec2.DescribeSnapshotAttributeInput) (*request.Request, *ec2.DescribeSnapshotAttributeOutput)
|
||||
|
||||
DescribeSnapshotAttribute(*ec2.DescribeSnapshotAttributeInput) (*ec2.DescribeSnapshotAttributeOutput, error)
|
||||
|
||||
DescribeSnapshotsRequest(*ec2.DescribeSnapshotsInput) (*request.Request, *ec2.DescribeSnapshotsOutput)
|
||||
|
||||
DescribeSnapshots(*ec2.DescribeSnapshotsInput) (*ec2.DescribeSnapshotsOutput, error)
|
||||
|
||||
DescribeSnapshotsPages(*ec2.DescribeSnapshotsInput, func(*ec2.DescribeSnapshotsOutput, bool) bool) error
|
||||
|
||||
DescribeSpotDatafeedSubscriptionRequest(*ec2.DescribeSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DescribeSpotDatafeedSubscriptionOutput)
|
||||
|
||||
DescribeSpotDatafeedSubscription(*ec2.DescribeSpotDatafeedSubscriptionInput) (*ec2.DescribeSpotDatafeedSubscriptionOutput, error)
|
||||
|
||||
DescribeSpotFleetInstancesRequest(*ec2.DescribeSpotFleetInstancesInput) (*request.Request, *ec2.DescribeSpotFleetInstancesOutput)
|
||||
|
||||
DescribeSpotFleetInstances(*ec2.DescribeSpotFleetInstancesInput) (*ec2.DescribeSpotFleetInstancesOutput, error)
|
||||
|
||||
DescribeSpotFleetRequestHistoryRequest(*ec2.DescribeSpotFleetRequestHistoryInput) (*request.Request, *ec2.DescribeSpotFleetRequestHistoryOutput)
|
||||
|
||||
DescribeSpotFleetRequestHistory(*ec2.DescribeSpotFleetRequestHistoryInput) (*ec2.DescribeSpotFleetRequestHistoryOutput, error)
|
||||
|
||||
DescribeSpotFleetRequestsRequest(*ec2.DescribeSpotFleetRequestsInput) (*request.Request, *ec2.DescribeSpotFleetRequestsOutput)
|
||||
|
||||
DescribeSpotFleetRequests(*ec2.DescribeSpotFleetRequestsInput) (*ec2.DescribeSpotFleetRequestsOutput, error)
|
||||
|
||||
DescribeSpotInstanceRequestsRequest(*ec2.DescribeSpotInstanceRequestsInput) (*request.Request, *ec2.DescribeSpotInstanceRequestsOutput)
|
||||
|
||||
DescribeSpotInstanceRequests(*ec2.DescribeSpotInstanceRequestsInput) (*ec2.DescribeSpotInstanceRequestsOutput, error)
|
||||
|
||||
DescribeSpotPriceHistoryRequest(*ec2.DescribeSpotPriceHistoryInput) (*request.Request, *ec2.DescribeSpotPriceHistoryOutput)
|
||||
|
||||
DescribeSpotPriceHistory(*ec2.DescribeSpotPriceHistoryInput) (*ec2.DescribeSpotPriceHistoryOutput, error)
|
||||
|
||||
DescribeSpotPriceHistoryPages(*ec2.DescribeSpotPriceHistoryInput, func(*ec2.DescribeSpotPriceHistoryOutput, bool) bool) error
|
||||
|
||||
DescribeSubnetsRequest(*ec2.DescribeSubnetsInput) (*request.Request, *ec2.DescribeSubnetsOutput)
|
||||
|
||||
DescribeSubnets(*ec2.DescribeSubnetsInput) (*ec2.DescribeSubnetsOutput, error)
|
||||
|
||||
DescribeTagsRequest(*ec2.DescribeTagsInput) (*request.Request, *ec2.DescribeTagsOutput)
|
||||
|
||||
DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error)
|
||||
|
||||
DescribeTagsPages(*ec2.DescribeTagsInput, func(*ec2.DescribeTagsOutput, bool) bool) error
|
||||
|
||||
DescribeVolumeAttributeRequest(*ec2.DescribeVolumeAttributeInput) (*request.Request, *ec2.DescribeVolumeAttributeOutput)
|
||||
|
||||
DescribeVolumeAttribute(*ec2.DescribeVolumeAttributeInput) (*ec2.DescribeVolumeAttributeOutput, error)
|
||||
|
||||
DescribeVolumeStatusRequest(*ec2.DescribeVolumeStatusInput) (*request.Request, *ec2.DescribeVolumeStatusOutput)
|
||||
|
||||
DescribeVolumeStatus(*ec2.DescribeVolumeStatusInput) (*ec2.DescribeVolumeStatusOutput, error)
|
||||
|
||||
DescribeVolumeStatusPages(*ec2.DescribeVolumeStatusInput, func(*ec2.DescribeVolumeStatusOutput, bool) bool) error
|
||||
|
||||
DescribeVolumesRequest(*ec2.DescribeVolumesInput) (*request.Request, *ec2.DescribeVolumesOutput)
|
||||
|
||||
DescribeVolumes(*ec2.DescribeVolumesInput) (*ec2.DescribeVolumesOutput, error)
|
||||
|
||||
DescribeVolumesPages(*ec2.DescribeVolumesInput, func(*ec2.DescribeVolumesOutput, bool) bool) error
|
||||
|
||||
DescribeVpcAttributeRequest(*ec2.DescribeVpcAttributeInput) (*request.Request, *ec2.DescribeVpcAttributeOutput)
|
||||
|
||||
DescribeVpcAttribute(*ec2.DescribeVpcAttributeInput) (*ec2.DescribeVpcAttributeOutput, error)
|
||||
|
||||
DescribeVpcClassicLinkRequest(*ec2.DescribeVpcClassicLinkInput) (*request.Request, *ec2.DescribeVpcClassicLinkOutput)
|
||||
|
||||
DescribeVpcClassicLink(*ec2.DescribeVpcClassicLinkInput) (*ec2.DescribeVpcClassicLinkOutput, error)
|
||||
|
||||
DescribeVpcClassicLinkDnsSupportRequest(*ec2.DescribeVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.DescribeVpcClassicLinkDnsSupportOutput)
|
||||
|
||||
DescribeVpcClassicLinkDnsSupport(*ec2.DescribeVpcClassicLinkDnsSupportInput) (*ec2.DescribeVpcClassicLinkDnsSupportOutput, error)
|
||||
|
||||
DescribeVpcEndpointServicesRequest(*ec2.DescribeVpcEndpointServicesInput) (*request.Request, *ec2.DescribeVpcEndpointServicesOutput)
|
||||
|
||||
DescribeVpcEndpointServices(*ec2.DescribeVpcEndpointServicesInput) (*ec2.DescribeVpcEndpointServicesOutput, error)
|
||||
|
||||
DescribeVpcEndpointsRequest(*ec2.DescribeVpcEndpointsInput) (*request.Request, *ec2.DescribeVpcEndpointsOutput)
|
||||
|
||||
DescribeVpcEndpoints(*ec2.DescribeVpcEndpointsInput) (*ec2.DescribeVpcEndpointsOutput, error)
|
||||
|
||||
DescribeVpcPeeringConnectionsRequest(*ec2.DescribeVpcPeeringConnectionsInput) (*request.Request, *ec2.DescribeVpcPeeringConnectionsOutput)
|
||||
|
||||
DescribeVpcPeeringConnections(*ec2.DescribeVpcPeeringConnectionsInput) (*ec2.DescribeVpcPeeringConnectionsOutput, error)
|
||||
|
||||
DescribeVpcsRequest(*ec2.DescribeVpcsInput) (*request.Request, *ec2.DescribeVpcsOutput)
|
||||
|
||||
DescribeVpcs(*ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error)
|
||||
|
||||
DescribeVpnConnectionsRequest(*ec2.DescribeVpnConnectionsInput) (*request.Request, *ec2.DescribeVpnConnectionsOutput)
|
||||
|
||||
DescribeVpnConnections(*ec2.DescribeVpnConnectionsInput) (*ec2.DescribeVpnConnectionsOutput, error)
|
||||
|
||||
DescribeVpnGatewaysRequest(*ec2.DescribeVpnGatewaysInput) (*request.Request, *ec2.DescribeVpnGatewaysOutput)
|
||||
|
||||
DescribeVpnGateways(*ec2.DescribeVpnGatewaysInput) (*ec2.DescribeVpnGatewaysOutput, error)
|
||||
|
||||
DetachClassicLinkVpcRequest(*ec2.DetachClassicLinkVpcInput) (*request.Request, *ec2.DetachClassicLinkVpcOutput)
|
||||
|
||||
DetachClassicLinkVpc(*ec2.DetachClassicLinkVpcInput) (*ec2.DetachClassicLinkVpcOutput, error)
|
||||
|
||||
DetachInternetGatewayRequest(*ec2.DetachInternetGatewayInput) (*request.Request, *ec2.DetachInternetGatewayOutput)
|
||||
|
||||
DetachInternetGateway(*ec2.DetachInternetGatewayInput) (*ec2.DetachInternetGatewayOutput, error)
|
||||
|
||||
DetachNetworkInterfaceRequest(*ec2.DetachNetworkInterfaceInput) (*request.Request, *ec2.DetachNetworkInterfaceOutput)
|
||||
|
||||
DetachNetworkInterface(*ec2.DetachNetworkInterfaceInput) (*ec2.DetachNetworkInterfaceOutput, error)
|
||||
|
||||
DetachVolumeRequest(*ec2.DetachVolumeInput) (*request.Request, *ec2.VolumeAttachment)
|
||||
|
||||
DetachVolume(*ec2.DetachVolumeInput) (*ec2.VolumeAttachment, error)
|
||||
|
||||
DetachVpnGatewayRequest(*ec2.DetachVpnGatewayInput) (*request.Request, *ec2.DetachVpnGatewayOutput)
|
||||
|
||||
DetachVpnGateway(*ec2.DetachVpnGatewayInput) (*ec2.DetachVpnGatewayOutput, error)
|
||||
|
||||
DisableVgwRoutePropagationRequest(*ec2.DisableVgwRoutePropagationInput) (*request.Request, *ec2.DisableVgwRoutePropagationOutput)
|
||||
|
||||
DisableVgwRoutePropagation(*ec2.DisableVgwRoutePropagationInput) (*ec2.DisableVgwRoutePropagationOutput, error)
|
||||
|
||||
DisableVpcClassicLinkRequest(*ec2.DisableVpcClassicLinkInput) (*request.Request, *ec2.DisableVpcClassicLinkOutput)
|
||||
|
||||
DisableVpcClassicLink(*ec2.DisableVpcClassicLinkInput) (*ec2.DisableVpcClassicLinkOutput, error)
|
||||
|
||||
DisableVpcClassicLinkDnsSupportRequest(*ec2.DisableVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.DisableVpcClassicLinkDnsSupportOutput)
|
||||
|
||||
DisableVpcClassicLinkDnsSupport(*ec2.DisableVpcClassicLinkDnsSupportInput) (*ec2.DisableVpcClassicLinkDnsSupportOutput, error)
|
||||
|
||||
DisassociateAddressRequest(*ec2.DisassociateAddressInput) (*request.Request, *ec2.DisassociateAddressOutput)
|
||||
|
||||
DisassociateAddress(*ec2.DisassociateAddressInput) (*ec2.DisassociateAddressOutput, error)
|
||||
|
||||
DisassociateRouteTableRequest(*ec2.DisassociateRouteTableInput) (*request.Request, *ec2.DisassociateRouteTableOutput)
|
||||
|
||||
DisassociateRouteTable(*ec2.DisassociateRouteTableInput) (*ec2.DisassociateRouteTableOutput, error)
|
||||
|
||||
EnableVgwRoutePropagationRequest(*ec2.EnableVgwRoutePropagationInput) (*request.Request, *ec2.EnableVgwRoutePropagationOutput)
|
||||
|
||||
EnableVgwRoutePropagation(*ec2.EnableVgwRoutePropagationInput) (*ec2.EnableVgwRoutePropagationOutput, error)
|
||||
|
||||
EnableVolumeIORequest(*ec2.EnableVolumeIOInput) (*request.Request, *ec2.EnableVolumeIOOutput)
|
||||
|
||||
EnableVolumeIO(*ec2.EnableVolumeIOInput) (*ec2.EnableVolumeIOOutput, error)
|
||||
|
||||
EnableVpcClassicLinkRequest(*ec2.EnableVpcClassicLinkInput) (*request.Request, *ec2.EnableVpcClassicLinkOutput)
|
||||
|
||||
EnableVpcClassicLink(*ec2.EnableVpcClassicLinkInput) (*ec2.EnableVpcClassicLinkOutput, error)
|
||||
|
||||
EnableVpcClassicLinkDnsSupportRequest(*ec2.EnableVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.EnableVpcClassicLinkDnsSupportOutput)
|
||||
|
||||
EnableVpcClassicLinkDnsSupport(*ec2.EnableVpcClassicLinkDnsSupportInput) (*ec2.EnableVpcClassicLinkDnsSupportOutput, error)
|
||||
|
||||
GetConsoleOutputRequest(*ec2.GetConsoleOutputInput) (*request.Request, *ec2.GetConsoleOutputOutput)
|
||||
|
||||
GetConsoleOutput(*ec2.GetConsoleOutputInput) (*ec2.GetConsoleOutputOutput, error)
|
||||
|
||||
GetPasswordDataRequest(*ec2.GetPasswordDataInput) (*request.Request, *ec2.GetPasswordDataOutput)
|
||||
|
||||
GetPasswordData(*ec2.GetPasswordDataInput) (*ec2.GetPasswordDataOutput, error)
|
||||
|
||||
ImportImageRequest(*ec2.ImportImageInput) (*request.Request, *ec2.ImportImageOutput)
|
||||
|
||||
ImportImage(*ec2.ImportImageInput) (*ec2.ImportImageOutput, error)
|
||||
|
||||
ImportInstanceRequest(*ec2.ImportInstanceInput) (*request.Request, *ec2.ImportInstanceOutput)
|
||||
|
||||
ImportInstance(*ec2.ImportInstanceInput) (*ec2.ImportInstanceOutput, error)
|
||||
|
||||
ImportKeyPairRequest(*ec2.ImportKeyPairInput) (*request.Request, *ec2.ImportKeyPairOutput)
|
||||
|
||||
ImportKeyPair(*ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error)
|
||||
|
||||
ImportSnapshotRequest(*ec2.ImportSnapshotInput) (*request.Request, *ec2.ImportSnapshotOutput)
|
||||
|
||||
ImportSnapshot(*ec2.ImportSnapshotInput) (*ec2.ImportSnapshotOutput, error)
|
||||
|
||||
ImportVolumeRequest(*ec2.ImportVolumeInput) (*request.Request, *ec2.ImportVolumeOutput)
|
||||
|
||||
ImportVolume(*ec2.ImportVolumeInput) (*ec2.ImportVolumeOutput, error)
|
||||
|
||||
ModifyHostsRequest(*ec2.ModifyHostsInput) (*request.Request, *ec2.ModifyHostsOutput)
|
||||
|
||||
ModifyHosts(*ec2.ModifyHostsInput) (*ec2.ModifyHostsOutput, error)
|
||||
|
||||
ModifyIdFormatRequest(*ec2.ModifyIdFormatInput) (*request.Request, *ec2.ModifyIdFormatOutput)
|
||||
|
||||
ModifyIdFormat(*ec2.ModifyIdFormatInput) (*ec2.ModifyIdFormatOutput, error)
|
||||
|
||||
ModifyImageAttributeRequest(*ec2.ModifyImageAttributeInput) (*request.Request, *ec2.ModifyImageAttributeOutput)
|
||||
|
||||
ModifyImageAttribute(*ec2.ModifyImageAttributeInput) (*ec2.ModifyImageAttributeOutput, error)
|
||||
|
||||
ModifyInstanceAttributeRequest(*ec2.ModifyInstanceAttributeInput) (*request.Request, *ec2.ModifyInstanceAttributeOutput)
|
||||
|
||||
ModifyInstanceAttribute(*ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error)
|
||||
|
||||
ModifyInstancePlacementRequest(*ec2.ModifyInstancePlacementInput) (*request.Request, *ec2.ModifyInstancePlacementOutput)
|
||||
|
||||
ModifyInstancePlacement(*ec2.ModifyInstancePlacementInput) (*ec2.ModifyInstancePlacementOutput, error)
|
||||
|
||||
ModifyNetworkInterfaceAttributeRequest(*ec2.ModifyNetworkInterfaceAttributeInput) (*request.Request, *ec2.ModifyNetworkInterfaceAttributeOutput)
|
||||
|
||||
ModifyNetworkInterfaceAttribute(*ec2.ModifyNetworkInterfaceAttributeInput) (*ec2.ModifyNetworkInterfaceAttributeOutput, error)
|
||||
|
||||
ModifyReservedInstancesRequest(*ec2.ModifyReservedInstancesInput) (*request.Request, *ec2.ModifyReservedInstancesOutput)
|
||||
|
||||
ModifyReservedInstances(*ec2.ModifyReservedInstancesInput) (*ec2.ModifyReservedInstancesOutput, error)
|
||||
|
||||
ModifySnapshotAttributeRequest(*ec2.ModifySnapshotAttributeInput) (*request.Request, *ec2.ModifySnapshotAttributeOutput)
|
||||
|
||||
ModifySnapshotAttribute(*ec2.ModifySnapshotAttributeInput) (*ec2.ModifySnapshotAttributeOutput, error)
|
||||
|
||||
ModifySpotFleetRequestRequest(*ec2.ModifySpotFleetRequestInput) (*request.Request, *ec2.ModifySpotFleetRequestOutput)
|
||||
|
||||
ModifySpotFleetRequest(*ec2.ModifySpotFleetRequestInput) (*ec2.ModifySpotFleetRequestOutput, error)
|
||||
|
||||
ModifySubnetAttributeRequest(*ec2.ModifySubnetAttributeInput) (*request.Request, *ec2.ModifySubnetAttributeOutput)
|
||||
|
||||
ModifySubnetAttribute(*ec2.ModifySubnetAttributeInput) (*ec2.ModifySubnetAttributeOutput, error)
|
||||
|
||||
ModifyVolumeAttributeRequest(*ec2.ModifyVolumeAttributeInput) (*request.Request, *ec2.ModifyVolumeAttributeOutput)
|
||||
|
||||
ModifyVolumeAttribute(*ec2.ModifyVolumeAttributeInput) (*ec2.ModifyVolumeAttributeOutput, error)
|
||||
|
||||
ModifyVpcAttributeRequest(*ec2.ModifyVpcAttributeInput) (*request.Request, *ec2.ModifyVpcAttributeOutput)
|
||||
|
||||
ModifyVpcAttribute(*ec2.ModifyVpcAttributeInput) (*ec2.ModifyVpcAttributeOutput, error)
|
||||
|
||||
ModifyVpcEndpointRequest(*ec2.ModifyVpcEndpointInput) (*request.Request, *ec2.ModifyVpcEndpointOutput)
|
||||
|
||||
ModifyVpcEndpoint(*ec2.ModifyVpcEndpointInput) (*ec2.ModifyVpcEndpointOutput, error)
|
||||
|
||||
MonitorInstancesRequest(*ec2.MonitorInstancesInput) (*request.Request, *ec2.MonitorInstancesOutput)
|
||||
|
||||
MonitorInstances(*ec2.MonitorInstancesInput) (*ec2.MonitorInstancesOutput, error)
|
||||
|
||||
MoveAddressToVpcRequest(*ec2.MoveAddressToVpcInput) (*request.Request, *ec2.MoveAddressToVpcOutput)
|
||||
|
||||
MoveAddressToVpc(*ec2.MoveAddressToVpcInput) (*ec2.MoveAddressToVpcOutput, error)
|
||||
|
||||
PurchaseReservedInstancesOfferingRequest(*ec2.PurchaseReservedInstancesOfferingInput) (*request.Request, *ec2.PurchaseReservedInstancesOfferingOutput)
|
||||
|
||||
PurchaseReservedInstancesOffering(*ec2.PurchaseReservedInstancesOfferingInput) (*ec2.PurchaseReservedInstancesOfferingOutput, error)
|
||||
|
||||
PurchaseScheduledInstancesRequest(*ec2.PurchaseScheduledInstancesInput) (*request.Request, *ec2.PurchaseScheduledInstancesOutput)
|
||||
|
||||
PurchaseScheduledInstances(*ec2.PurchaseScheduledInstancesInput) (*ec2.PurchaseScheduledInstancesOutput, error)
|
||||
|
||||
RebootInstancesRequest(*ec2.RebootInstancesInput) (*request.Request, *ec2.RebootInstancesOutput)
|
||||
|
||||
RebootInstances(*ec2.RebootInstancesInput) (*ec2.RebootInstancesOutput, error)
|
||||
|
||||
RegisterImageRequest(*ec2.RegisterImageInput) (*request.Request, *ec2.RegisterImageOutput)
|
||||
|
||||
RegisterImage(*ec2.RegisterImageInput) (*ec2.RegisterImageOutput, error)
|
||||
|
||||
RejectVpcPeeringConnectionRequest(*ec2.RejectVpcPeeringConnectionInput) (*request.Request, *ec2.RejectVpcPeeringConnectionOutput)
|
||||
|
||||
RejectVpcPeeringConnection(*ec2.RejectVpcPeeringConnectionInput) (*ec2.RejectVpcPeeringConnectionOutput, error)
|
||||
|
||||
ReleaseAddressRequest(*ec2.ReleaseAddressInput) (*request.Request, *ec2.ReleaseAddressOutput)
|
||||
|
||||
ReleaseAddress(*ec2.ReleaseAddressInput) (*ec2.ReleaseAddressOutput, error)
|
||||
|
||||
ReleaseHostsRequest(*ec2.ReleaseHostsInput) (*request.Request, *ec2.ReleaseHostsOutput)
|
||||
|
||||
ReleaseHosts(*ec2.ReleaseHostsInput) (*ec2.ReleaseHostsOutput, error)
|
||||
|
||||
ReplaceNetworkAclAssociationRequest(*ec2.ReplaceNetworkAclAssociationInput) (*request.Request, *ec2.ReplaceNetworkAclAssociationOutput)
|
||||
|
||||
ReplaceNetworkAclAssociation(*ec2.ReplaceNetworkAclAssociationInput) (*ec2.ReplaceNetworkAclAssociationOutput, error)
|
||||
|
||||
ReplaceNetworkAclEntryRequest(*ec2.ReplaceNetworkAclEntryInput) (*request.Request, *ec2.ReplaceNetworkAclEntryOutput)
|
||||
|
||||
ReplaceNetworkAclEntry(*ec2.ReplaceNetworkAclEntryInput) (*ec2.ReplaceNetworkAclEntryOutput, error)
|
||||
|
||||
ReplaceRouteRequest(*ec2.ReplaceRouteInput) (*request.Request, *ec2.ReplaceRouteOutput)
|
||||
|
||||
ReplaceRoute(*ec2.ReplaceRouteInput) (*ec2.ReplaceRouteOutput, error)
|
||||
|
||||
ReplaceRouteTableAssociationRequest(*ec2.ReplaceRouteTableAssociationInput) (*request.Request, *ec2.ReplaceRouteTableAssociationOutput)
|
||||
|
||||
ReplaceRouteTableAssociation(*ec2.ReplaceRouteTableAssociationInput) (*ec2.ReplaceRouteTableAssociationOutput, error)
|
||||
|
||||
ReportInstanceStatusRequest(*ec2.ReportInstanceStatusInput) (*request.Request, *ec2.ReportInstanceStatusOutput)
|
||||
|
||||
ReportInstanceStatus(*ec2.ReportInstanceStatusInput) (*ec2.ReportInstanceStatusOutput, error)
|
||||
|
||||
RequestSpotFleetRequest(*ec2.RequestSpotFleetInput) (*request.Request, *ec2.RequestSpotFleetOutput)
|
||||
|
||||
RequestSpotFleet(*ec2.RequestSpotFleetInput) (*ec2.RequestSpotFleetOutput, error)
|
||||
|
||||
RequestSpotInstancesRequest(*ec2.RequestSpotInstancesInput) (*request.Request, *ec2.RequestSpotInstancesOutput)
|
||||
|
||||
RequestSpotInstances(*ec2.RequestSpotInstancesInput) (*ec2.RequestSpotInstancesOutput, error)
|
||||
|
||||
ResetImageAttributeRequest(*ec2.ResetImageAttributeInput) (*request.Request, *ec2.ResetImageAttributeOutput)
|
||||
|
||||
ResetImageAttribute(*ec2.ResetImageAttributeInput) (*ec2.ResetImageAttributeOutput, error)
|
||||
|
||||
ResetInstanceAttributeRequest(*ec2.ResetInstanceAttributeInput) (*request.Request, *ec2.ResetInstanceAttributeOutput)
|
||||
|
||||
ResetInstanceAttribute(*ec2.ResetInstanceAttributeInput) (*ec2.ResetInstanceAttributeOutput, error)
|
||||
|
||||
ResetNetworkInterfaceAttributeRequest(*ec2.ResetNetworkInterfaceAttributeInput) (*request.Request, *ec2.ResetNetworkInterfaceAttributeOutput)
|
||||
|
||||
ResetNetworkInterfaceAttribute(*ec2.ResetNetworkInterfaceAttributeInput) (*ec2.ResetNetworkInterfaceAttributeOutput, error)
|
||||
|
||||
ResetSnapshotAttributeRequest(*ec2.ResetSnapshotAttributeInput) (*request.Request, *ec2.ResetSnapshotAttributeOutput)
|
||||
|
||||
ResetSnapshotAttribute(*ec2.ResetSnapshotAttributeInput) (*ec2.ResetSnapshotAttributeOutput, error)
|
||||
|
||||
RestoreAddressToClassicRequest(*ec2.RestoreAddressToClassicInput) (*request.Request, *ec2.RestoreAddressToClassicOutput)
|
||||
|
||||
RestoreAddressToClassic(*ec2.RestoreAddressToClassicInput) (*ec2.RestoreAddressToClassicOutput, error)
|
||||
|
||||
RevokeSecurityGroupEgressRequest(*ec2.RevokeSecurityGroupEgressInput) (*request.Request, *ec2.RevokeSecurityGroupEgressOutput)
|
||||
|
||||
RevokeSecurityGroupEgress(*ec2.RevokeSecurityGroupEgressInput) (*ec2.RevokeSecurityGroupEgressOutput, error)
|
||||
|
||||
RevokeSecurityGroupIngressRequest(*ec2.RevokeSecurityGroupIngressInput) (*request.Request, *ec2.RevokeSecurityGroupIngressOutput)
|
||||
|
||||
RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error)
|
||||
|
||||
RunInstancesRequest(*ec2.RunInstancesInput) (*request.Request, *ec2.Reservation)
|
||||
|
||||
RunInstances(*ec2.RunInstancesInput) (*ec2.Reservation, error)
|
||||
|
||||
RunScheduledInstancesRequest(*ec2.RunScheduledInstancesInput) (*request.Request, *ec2.RunScheduledInstancesOutput)
|
||||
|
||||
RunScheduledInstances(*ec2.RunScheduledInstancesInput) (*ec2.RunScheduledInstancesOutput, error)
|
||||
|
||||
StartInstancesRequest(*ec2.StartInstancesInput) (*request.Request, *ec2.StartInstancesOutput)
|
||||
|
||||
StartInstances(*ec2.StartInstancesInput) (*ec2.StartInstancesOutput, error)
|
||||
|
||||
StopInstancesRequest(*ec2.StopInstancesInput) (*request.Request, *ec2.StopInstancesOutput)
|
||||
|
||||
StopInstances(*ec2.StopInstancesInput) (*ec2.StopInstancesOutput, error)
|
||||
|
||||
TerminateInstancesRequest(*ec2.TerminateInstancesInput) (*request.Request, *ec2.TerminateInstancesOutput)
|
||||
|
||||
TerminateInstances(*ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error)
|
||||
|
||||
UnassignPrivateIpAddressesRequest(*ec2.UnassignPrivateIpAddressesInput) (*request.Request, *ec2.UnassignPrivateIpAddressesOutput)
|
||||
|
||||
UnassignPrivateIpAddresses(*ec2.UnassignPrivateIpAddressesInput) (*ec2.UnassignPrivateIpAddressesOutput, error)
|
||||
|
||||
UnmonitorInstancesRequest(*ec2.UnmonitorInstancesInput) (*request.Request, *ec2.UnmonitorInstancesOutput)
|
||||
|
||||
UnmonitorInstances(*ec2.UnmonitorInstancesInput) (*ec2.UnmonitorInstancesOutput, error)
|
||||
}
|
||||
|
||||
var _ EC2API = (*ec2.EC2)(nil)
|
|
@ -0,0 +1,89 @@
|
|||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package ec2
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/ec2query"
|
||||
"github.com/aws/aws-sdk-go/private/signer/v4"
|
||||
)
|
||||
|
||||
// Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity
|
||||
// in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your
|
||||
// need to invest in hardware up front, so you can develop and deploy applications
|
||||
// faster.
|
||||
//The service client's operations are safe to be used concurrently.
|
||||
// It is not safe to mutate any of the client's properties though.
|
||||
type EC2 struct {
|
||||
*client.Client
|
||||
}
|
||||
|
||||
// Used for custom client initialization logic
|
||||
var initClient func(*client.Client)
|
||||
|
||||
// Used for custom request initialization logic
|
||||
var initRequest func(*request.Request)
|
||||
|
||||
// A ServiceName is the name of the service the client will make API calls to.
|
||||
const ServiceName = "ec2"
|
||||
|
||||
// New creates a new instance of the EC2 client with a session.
|
||||
// If additional configuration is needed for the client instance use the optional
|
||||
// aws.Config parameter to add your extra config.
|
||||
//
|
||||
// Example:
|
||||
// // Create a EC2 client from just a session.
|
||||
// svc := ec2.New(mySession)
|
||||
//
|
||||
// // Create a EC2 client with additional configuration
|
||||
// svc := ec2.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2 {
|
||||
c := p.ClientConfig(ServiceName, cfgs...)
|
||||
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
|
||||
}
|
||||
|
||||
// newClient creates, initializes and returns a new service client instance.
|
||||
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *EC2 {
|
||||
svc := &EC2{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: ServiceName,
|
||||
SigningRegion: signingRegion,
|
||||
Endpoint: endpoint,
|
||||
APIVersion: "2015-10-01",
|
||||
},
|
||||
handlers,
|
||||
),
|
||||
}
|
||||
|
||||
// Handlers
|
||||
svc.Handlers.Sign.PushBack(v4.Sign)
|
||||
svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler)
|
||||
svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler)
|
||||
svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler)
|
||||
svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler)
|
||||
|
||||
// Run custom client initialization if present
|
||||
if initClient != nil {
|
||||
initClient(svc.Client)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// newRequest creates a new request for a EC2 operation and runs any
|
||||
// custom request initialization.
|
||||
func (c *EC2) newRequest(op *request.Operation, params, data interface{}) *request.Request {
|
||||
req := c.NewRequest(op, params, data)
|
||||
|
||||
// Run custom request initialization if present
|
||||
if initRequest != nil {
|
||||
initRequest(req)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
|
@ -0,0 +1,761 @@
|
|||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package ec2
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/private/waiter"
|
||||
)
|
||||
|
||||
func (c *EC2) WaitUntilBundleTaskComplete(input *DescribeBundleTasksInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeBundleTasks",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "BundleTasks[].State",
|
||||
Expected: "complete",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "BundleTasks[].State",
|
||||
Expected: "failed",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilConversionTaskCancelled(input *DescribeConversionTasksInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeConversionTasks",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "ConversionTasks[].State",
|
||||
Expected: "cancelled",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilConversionTaskCompleted(input *DescribeConversionTasksInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeConversionTasks",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "ConversionTasks[].State",
|
||||
Expected: "completed",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "ConversionTasks[].State",
|
||||
Expected: "cancelled",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "ConversionTasks[].State",
|
||||
Expected: "cancelling",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilConversionTaskDeleted(input *DescribeConversionTasksInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeConversionTasks",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "ConversionTasks[].State",
|
||||
Expected: "deleted",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilCustomerGatewayAvailable(input *DescribeCustomerGatewaysInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeCustomerGateways",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "CustomerGateways[].State",
|
||||
Expected: "available",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "CustomerGateways[].State",
|
||||
Expected: "deleted",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "CustomerGateways[].State",
|
||||
Expected: "deleting",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilExportTaskCancelled(input *DescribeExportTasksInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeExportTasks",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "ExportTasks[].State",
|
||||
Expected: "cancelled",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilExportTaskCompleted(input *DescribeExportTasksInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeExportTasks",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "ExportTasks[].State",
|
||||
Expected: "completed",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilImageAvailable(input *DescribeImagesInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeImages",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "Images[].State",
|
||||
Expected: "available",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "Images[].State",
|
||||
Expected: "failed",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilInstanceExists(input *DescribeInstancesInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeInstances",
|
||||
Delay: 5,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "status",
|
||||
Argument: "",
|
||||
Expected: 200,
|
||||
},
|
||||
{
|
||||
State: "retry",
|
||||
Matcher: "error",
|
||||
Argument: "",
|
||||
Expected: "InvalidInstanceIDNotFound",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilInstanceRunning(input *DescribeInstancesInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeInstances",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "Reservations[].Instances[].State.Name",
|
||||
Expected: "running",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "Reservations[].Instances[].State.Name",
|
||||
Expected: "shutting-down",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "Reservations[].Instances[].State.Name",
|
||||
Expected: "terminated",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "Reservations[].Instances[].State.Name",
|
||||
Expected: "stopping",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilInstanceStatusOk(input *DescribeInstanceStatusInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeInstanceStatus",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "InstanceStatuses[].InstanceStatus.Status",
|
||||
Expected: "ok",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilInstanceStopped(input *DescribeInstancesInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeInstances",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "Reservations[].Instances[].State.Name",
|
||||
Expected: "stopped",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "Reservations[].Instances[].State.Name",
|
||||
Expected: "pending",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "Reservations[].Instances[].State.Name",
|
||||
Expected: "terminated",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeInstances",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "Reservations[].Instances[].State.Name",
|
||||
Expected: "terminated",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "Reservations[].Instances[].State.Name",
|
||||
Expected: "pending",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "Reservations[].Instances[].State.Name",
|
||||
Expected: "stopping",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilKeyPairExists(input *DescribeKeyPairsInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeKeyPairs",
|
||||
Delay: 5,
|
||||
MaxAttempts: 6,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "length(KeyPairs[].KeyName) > `0`",
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
State: "retry",
|
||||
Matcher: "error",
|
||||
Argument: "",
|
||||
Expected: "InvalidKeyPairNotFound",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilNetworkInterfaceAvailable(input *DescribeNetworkInterfacesInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeNetworkInterfaces",
|
||||
Delay: 20,
|
||||
MaxAttempts: 10,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "NetworkInterfaces[].Status",
|
||||
Expected: "available",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "error",
|
||||
Argument: "",
|
||||
Expected: "InvalidNetworkInterfaceIDNotFound",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilPasswordDataAvailable(input *GetPasswordDataInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "GetPasswordData",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "path",
|
||||
Argument: "length(PasswordData) > `0`",
|
||||
Expected: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilSnapshotCompleted(input *DescribeSnapshotsInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeSnapshots",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "Snapshots[].State",
|
||||
Expected: "completed",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilSpotInstanceRequestFulfilled(input *DescribeSpotInstanceRequestsInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeSpotInstanceRequests",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "SpotInstanceRequests[].Status.Code",
|
||||
Expected: "fulfilled",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "SpotInstanceRequests[].Status.Code",
|
||||
Expected: "schedule-expired",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "SpotInstanceRequests[].Status.Code",
|
||||
Expected: "canceled-before-fulfillment",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "SpotInstanceRequests[].Status.Code",
|
||||
Expected: "bad-parameters",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "SpotInstanceRequests[].Status.Code",
|
||||
Expected: "system-error",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilSubnetAvailable(input *DescribeSubnetsInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeSubnets",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "Subnets[].State",
|
||||
Expected: "available",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilSystemStatusOk(input *DescribeInstanceStatusInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeInstanceStatus",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "InstanceStatuses[].SystemStatus.Status",
|
||||
Expected: "ok",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilVolumeAvailable(input *DescribeVolumesInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeVolumes",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "Volumes[].State",
|
||||
Expected: "available",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "Volumes[].State",
|
||||
Expected: "deleted",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilVolumeDeleted(input *DescribeVolumesInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeVolumes",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "Volumes[].State",
|
||||
Expected: "deleted",
|
||||
},
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "error",
|
||||
Argument: "",
|
||||
Expected: "InvalidVolumeNotFound",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilVolumeInUse(input *DescribeVolumesInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeVolumes",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "Volumes[].State",
|
||||
Expected: "in-use",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "Volumes[].State",
|
||||
Expected: "deleted",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilVpcAvailable(input *DescribeVpcsInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeVpcs",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "Vpcs[].State",
|
||||
Expected: "available",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilVpnConnectionAvailable(input *DescribeVpnConnectionsInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeVpnConnections",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "VpnConnections[].State",
|
||||
Expected: "available",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "VpnConnections[].State",
|
||||
Expected: "deleting",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "VpnConnections[].State",
|
||||
Expected: "deleted",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *EC2) WaitUntilVpnConnectionDeleted(input *DescribeVpnConnectionsInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "DescribeVpnConnections",
|
||||
Delay: 15,
|
||||
MaxAttempts: 40,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "VpnConnections[].State",
|
||||
Expected: "deleted",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "VpnConnections[].State",
|
||||
Expected: "pending",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,43 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`)
|
||||
|
||||
func buildGetBucketLocation(r *request.Request) {
|
||||
if r.DataFilled() {
|
||||
out := r.Data.(*GetBucketLocationOutput)
|
||||
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed reading response body", err)
|
||||
return
|
||||
}
|
||||
|
||||
match := reBucketLocation.FindSubmatch(b)
|
||||
if len(match) > 1 {
|
||||
loc := string(match[1])
|
||||
out.LocationConstraint = &loc
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func populateLocationConstraint(r *request.Request) {
|
||||
if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" {
|
||||
in := r.Params.(*CreateBucketInput)
|
||||
if in.CreateBucketConfiguration == nil {
|
||||
r.Params = awsutil.CopyOf(r.Params)
|
||||
in = r.Params.(*CreateBucketInput)
|
||||
in.CreateBucketConfiguration = &CreateBucketConfiguration{
|
||||
LocationConstraint: r.Config.Region,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
|
||||
// require it.
|
||||
func contentMD5(r *request.Request) {
|
||||
h := md5.New()
|
||||
|
||||
// hash the body. seek back to the first position after reading to reset
|
||||
// the body for transmission. copy errors may be assumed to be from the
|
||||
// body.
|
||||
_, err := io.Copy(h, r.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("ContentMD5", "failed to read body", err)
|
||||
return
|
||||
}
|
||||
_, err = r.Body.Seek(0, 0)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("ContentMD5", "failed to seek body", err)
|
||||
return
|
||||
}
|
||||
|
||||
// encode the md5 checksum in base64 and set the request header.
|
||||
sum := h.Sum(nil)
|
||||
sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
|
||||
base64.StdEncoding.Encode(sum64, sum)
|
||||
r.HTTPRequest.Header.Set("Content-MD5", string(sum64))
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
func init() {
|
||||
initClient = func(c *client.Client) {
|
||||
// Support building custom host-style bucket endpoints
|
||||
c.Handlers.Build.PushFront(updateHostWithBucket)
|
||||
|
||||
// Require SSL when using SSE keys
|
||||
c.Handlers.Validate.PushBack(validateSSERequiresSSL)
|
||||
c.Handlers.Build.PushBack(computeSSEKeys)
|
||||
|
||||
// S3 uses custom error unmarshaling logic
|
||||
c.Handlers.UnmarshalError.Clear()
|
||||
c.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||
}
|
||||
|
||||
initRequest = func(r *request.Request) {
|
||||
switch r.Operation.Name {
|
||||
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration:
|
||||
// These S3 operations require Content-MD5 to be set
|
||||
r.Handlers.Build.PushBack(contentMD5)
|
||||
case opGetBucketLocation:
|
||||
// GetBucketLocation has custom parsing logic
|
||||
r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
|
||||
case opCreateBucket:
|
||||
// Auto-populate LocationConstraint with current region
|
||||
r.Handlers.Validate.PushFront(populateLocationConstraint)
|
||||
case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
|
||||
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
|
||||
var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
|
||||
|
||||
// dnsCompatibleBucketName returns true if the bucket name is DNS compatible.
|
||||
// Buckets created outside of the classic region MUST be DNS compatible.
|
||||
func dnsCompatibleBucketName(bucket string) bool {
|
||||
return reDomain.MatchString(bucket) &&
|
||||
!reIPAddress.MatchString(bucket) &&
|
||||
!strings.Contains(bucket, "..")
|
||||
}
|
||||
|
||||
// hostStyleBucketName returns true if the request should put the bucket in
|
||||
// the host. This is false if S3ForcePathStyle is explicitly set or if the
|
||||
// bucket is not DNS compatible.
|
||||
func hostStyleBucketName(r *request.Request, bucket string) bool {
|
||||
if aws.BoolValue(r.Config.S3ForcePathStyle) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Bucket might be DNS compatible but dots in the hostname will fail
|
||||
// certificate validation, so do not use host-style.
|
||||
if r.HTTPRequest.URL.Scheme == "https" && strings.Contains(bucket, ".") {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBucketLocation should be able to be called from any region within
|
||||
// a partition, and return the associated region of the bucket.
|
||||
if r.Operation.Name == opGetBucketLocation {
|
||||
return false
|
||||
}
|
||||
|
||||
// Use host-style if the bucket is DNS compatible
|
||||
return dnsCompatibleBucketName(bucket)
|
||||
}
|
||||
|
||||
func updateHostWithBucket(r *request.Request) {
|
||||
b, _ := awsutil.ValuesAtPath(r.Params, "Bucket")
|
||||
if len(b) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if bucket := b[0].(*string); aws.StringValue(bucket) != "" && hostStyleBucketName(r, *bucket) {
|
||||
r.HTTPRequest.URL.Host = *bucket + "." + r.HTTPRequest.URL.Host
|
||||
r.HTTPRequest.URL.Path = strings.Replace(r.HTTPRequest.URL.Path, "/{Bucket}", "", -1)
|
||||
if r.HTTPRequest.URL.Path == "" {
|
||||
r.HTTPRequest.URL.Path = "/"
|
||||
}
|
||||
}
|
||||
}
|
23
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go
generated
vendored
Normal file
23
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// Package s3manageriface provides an interface for the s3manager package
|
||||
package s3manageriface
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
)
|
||||
|
||||
// DownloaderAPI is the interface type for s3manager.Downloader.
|
||||
type DownloaderAPI interface {
|
||||
Download(io.WriterAt, *s3.GetObjectInput, ...func(*s3manager.Downloader)) (int64, error)
|
||||
}
|
||||
|
||||
var _ DownloaderAPI = (*s3manager.Downloader)(nil)
|
||||
|
||||
// UploaderAPI is the interface type for s3manager.Uploader.
|
||||
type UploaderAPI interface {
|
||||
Upload(*s3manager.UploadInput, ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error)
|
||||
}
|
||||
|
||||
var _ UploaderAPI = (*s3manager.Uploader)(nil)
|
|
@ -0,0 +1,86 @@
|
|||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/restxml"
|
||||
"github.com/aws/aws-sdk-go/private/signer/v4"
|
||||
)
|
||||
|
||||
// S3 is a client for Amazon S3.
|
||||
//The service client's operations are safe to be used concurrently.
|
||||
// It is not safe to mutate any of the client's properties though.
|
||||
type S3 struct {
|
||||
*client.Client
|
||||
}
|
||||
|
||||
// Used for custom client initialization logic
|
||||
var initClient func(*client.Client)
|
||||
|
||||
// Used for custom request initialization logic
|
||||
var initRequest func(*request.Request)
|
||||
|
||||
// A ServiceName is the name of the service the client will make API calls to.
|
||||
const ServiceName = "s3"
|
||||
|
||||
// New creates a new instance of the S3 client with a session.
|
||||
// If additional configuration is needed for the client instance use the optional
|
||||
// aws.Config parameter to add your extra config.
|
||||
//
|
||||
// Example:
|
||||
// // Create a S3 client from just a session.
|
||||
// svc := s3.New(mySession)
|
||||
//
|
||||
// // Create a S3 client with additional configuration
|
||||
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
|
||||
c := p.ClientConfig(ServiceName, cfgs...)
|
||||
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
|
||||
}
|
||||
|
||||
// newClient creates, initializes and returns a new service client instance.
|
||||
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *S3 {
|
||||
svc := &S3{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: ServiceName,
|
||||
SigningRegion: signingRegion,
|
||||
Endpoint: endpoint,
|
||||
APIVersion: "2006-03-01",
|
||||
},
|
||||
handlers,
|
||||
),
|
||||
}
|
||||
|
||||
// Handlers
|
||||
svc.Handlers.Sign.PushBack(v4.Sign)
|
||||
svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
|
||||
svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
|
||||
svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
|
||||
svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
|
||||
|
||||
// Run custom client initialization if present
|
||||
if initClient != nil {
|
||||
initClient(svc.Client)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// newRequest creates a new request for a S3 operation and runs any
|
||||
// custom request initialization.
|
||||
func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request {
|
||||
req := c.NewRequest(op, params, data)
|
||||
|
||||
// Run custom request initialization if present
|
||||
if initRequest != nil {
|
||||
initRequest(req)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
|
||||
|
||||
func validateSSERequiresSSL(r *request.Request) {
|
||||
if r.HTTPRequest.URL.Scheme != "https" {
|
||||
p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey")
|
||||
if len(p) > 0 {
|
||||
r.Error = errSSERequiresSSL
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func computeSSEKeys(r *request.Request) {
|
||||
headers := []string{
|
||||
"x-amz-server-side-encryption-customer-key",
|
||||
"x-amz-copy-source-server-side-encryption-customer-key",
|
||||
}
|
||||
|
||||
for _, h := range headers {
|
||||
md5h := h + "-md5"
|
||||
if key := r.HTTPRequest.Header.Get(h); key != "" {
|
||||
// Base64-encode the value
|
||||
b64v := base64.StdEncoding.EncodeToString([]byte(key))
|
||||
r.HTTPRequest.Header.Set(h, b64v)
|
||||
|
||||
// Add MD5 if it wasn't computed
|
||||
if r.HTTPRequest.Header.Get(md5h) == "" {
|
||||
sum := md5.Sum([]byte(key))
|
||||
b64sum := base64.StdEncoding.EncodeToString(sum[:])
|
||||
r.HTTPRequest.Header.Set(md5h, b64sum)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
|
||||
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "unable to read response body", err)
|
||||
return
|
||||
}
|
||||
body := bytes.NewReader(b)
|
||||
r.HTTPResponse.Body = aws.ReadSeekCloser(body)
|
||||
defer r.HTTPResponse.Body.(aws.ReaderSeekerCloser).Seek(0, 0)
|
||||
|
||||
if body.Len() == 0 {
|
||||
// If there is no body don't attempt to parse the body.
|
||||
return
|
||||
}
|
||||
|
||||
unmarshalError(r)
|
||||
if err, ok := r.Error.(awserr.Error); ok && err != nil {
|
||||
if err.Code() == "SerializationError" {
|
||||
r.Error = nil
|
||||
return
|
||||
}
|
||||
r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
type xmlErrorResponse struct {
|
||||
XMLName xml.Name `xml:"Error"`
|
||||
Code string `xml:"Code"`
|
||||
Message string `xml:"Message"`
|
||||
}
|
||||
|
||||
func unmarshalError(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
|
||||
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("BucketRegionError",
|
||||
fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
|
||||
aws.StringValue(r.Config.Region)),
|
||||
nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
if r.HTTPResponse.ContentLength == 0 {
|
||||
// No body, use status code to generate an awserr.Error
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
resp := &xmlErrorResponse{}
|
||||
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
|
||||
if err != nil && err != io.EOF {
|
||||
r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil)
|
||||
} else {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(resp.Code, resp.Message, nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/private/waiter"
|
||||
)
|
||||
|
||||
func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "HeadBucket",
|
||||
Delay: 5,
|
||||
MaxAttempts: 20,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "status",
|
||||
Argument: "",
|
||||
Expected: 200,
|
||||
},
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "status",
|
||||
Argument: "",
|
||||
Expected: 403,
|
||||
},
|
||||
{
|
||||
State: "retry",
|
||||
Matcher: "status",
|
||||
Argument: "",
|
||||
Expected: 404,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "HeadBucket",
|
||||
Delay: 5,
|
||||
MaxAttempts: 20,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "status",
|
||||
Argument: "",
|
||||
Expected: 404,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "HeadObject",
|
||||
Delay: 5,
|
||||
MaxAttempts: 20,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "status",
|
||||
Argument: "",
|
||||
Expected: 200,
|
||||
},
|
||||
{
|
||||
State: "retry",
|
||||
Matcher: "status",
|
||||
Argument: "",
|
||||
Expected: 404,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "HeadObject",
|
||||
Delay: 5,
|
||||
MaxAttempts: 20,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "status",
|
||||
Argument: "",
|
||||
Expected: 404,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w := waiter.Waiter{
|
||||
Client: c,
|
||||
Input: input,
|
||||
Config: waiterCfg,
|
||||
}
|
||||
return w.Wait()
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,12 @@
|
|||
package sts
|
||||
|
||||
import "github.com/aws/aws-sdk-go/aws/request"
|
||||
|
||||
func init() {
|
||||
initRequest = func(r *request.Request) {
|
||||
switch r.Operation.Name {
|
||||
case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity:
|
||||
r.Handlers.Sign.Clear() // these operations are unsigned
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package sts
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/query"
|
||||
"github.com/aws/aws-sdk-go/private/signer/v4"
|
||||
)
|
||||
|
||||
// The AWS Security Token Service (STS) is a web service that enables you to
|
||||
// request temporary, limited-privilege credentials for AWS Identity and Access
|
||||
// Management (IAM) users or for users that you authenticate (federated users).
|
||||
// This guide provides descriptions of the STS API. For more detailed information
|
||||
// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
|
||||
//
|
||||
// As an alternative to using the API, you can use one of the AWS SDKs, which
|
||||
// consist of libraries and sample code for various programming languages and
|
||||
// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
|
||||
// way to create programmatic access to STS. For example, the SDKs take care
|
||||
// of cryptographically signing requests, managing errors, and retrying requests
|
||||
// automatically. For information about the AWS SDKs, including how to download
|
||||
// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
|
||||
// For information about setting up signatures and authorization through the
|
||||
// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html"
|
||||
// target="_blank) in the AWS General Reference. For general information about
|
||||
// the Query API, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html"
|
||||
// target="_blank) in Using IAM. For information about using security tokens
|
||||
// with other AWS products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
|
||||
// in the Using IAM.
|
||||
//
|
||||
// If you're new to AWS and need additional technical information about a specific
|
||||
// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
|
||||
// (http://aws.amazon.com/documentation/" target="_blank).
|
||||
//
|
||||
// Endpoints
|
||||
//
|
||||
// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
|
||||
// that maps to the US East (N. Virginia) region. Additional regions are available,
|
||||
// but must first be activated in the AWS Management Console before you can
|
||||
// use a different region's endpoint. For more information about activating
|
||||
// a region for STS see Activating STS in a New Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the Using IAM.
|
||||
//
|
||||
// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
|
||||
// in the AWS General Reference.
|
||||
//
|
||||
// Recording API requests
|
||||
//
|
||||
// STS supports AWS CloudTrail, which is a service that records AWS calls for
|
||||
// your AWS account and delivers log files to an Amazon S3 bucket. By using
|
||||
// information collected by CloudTrail, you can determine what requests were
|
||||
// successfully made to STS, who made the request, when it was made, and so
|
||||
// on. To learn more about CloudTrail, including how to turn it on and find
|
||||
// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
|
||||
//The service client's operations are safe to be used concurrently.
|
||||
// It is not safe to mutate any of the client's properties though.
|
||||
type STS struct {
|
||||
*client.Client
|
||||
}
|
||||
|
||||
// Used for custom client initialization logic
|
||||
var initClient func(*client.Client)
|
||||
|
||||
// Used for custom request initialization logic
|
||||
var initRequest func(*request.Request)
|
||||
|
||||
// A ServiceName is the name of the service the client will make API calls to.
|
||||
const ServiceName = "sts"
|
||||
|
||||
// New creates a new instance of the STS client with a session.
|
||||
// If additional configuration is needed for the client instance use the optional
|
||||
// aws.Config parameter to add your extra config.
|
||||
//
|
||||
// Example:
|
||||
// // Create a STS client from just a session.
|
||||
// svc := sts.New(mySession)
|
||||
//
|
||||
// // Create a STS client with additional configuration
|
||||
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
|
||||
c := p.ClientConfig(ServiceName, cfgs...)
|
||||
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
|
||||
}
|
||||
|
||||
// newClient creates, initializes and returns a new service client instance.
|
||||
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *STS {
|
||||
svc := &STS{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: ServiceName,
|
||||
SigningRegion: signingRegion,
|
||||
Endpoint: endpoint,
|
||||
APIVersion: "2011-06-15",
|
||||
},
|
||||
handlers,
|
||||
),
|
||||
}
|
||||
|
||||
// Handlers
|
||||
svc.Handlers.Sign.PushBack(v4.Sign)
|
||||
svc.Handlers.Build.PushBackNamed(query.BuildHandler)
|
||||
svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
|
||||
svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
|
||||
svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
|
||||
|
||||
// Run custom client initialization if present
|
||||
if initClient != nil {
|
||||
initClient(svc.Client)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// newRequest creates a new request for a STS operation and runs any
|
||||
// custom request initialization.
|
||||
func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
|
||||
req := c.NewRequest(op, params, data)
|
||||
|
||||
// Run custom request initialization if present
|
||||
if initRequest != nil {
|
||||
initRequest(req)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
38
vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go
generated
vendored
Normal file
38
vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
// Package stsiface provides an interface for the AWS Security Token Service.
|
||||
package stsiface
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
)
|
||||
|
||||
// STSAPI is the interface type for sts.STS.
|
||||
type STSAPI interface {
|
||||
AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput)
|
||||
|
||||
AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
|
||||
|
||||
AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput)
|
||||
|
||||
AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error)
|
||||
|
||||
AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput)
|
||||
|
||||
AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error)
|
||||
|
||||
DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput)
|
||||
|
||||
DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error)
|
||||
|
||||
GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput)
|
||||
|
||||
GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error)
|
||||
|
||||
GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput)
|
||||
|
||||
GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error)
|
||||
}
|
||||
|
||||
var _ STSAPI = (*sts.STS)(nil)
|
|
@ -0,0 +1,2 @@
|
|||
example/example
|
||||
example/example.exe
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [2013] [the CloudFoundry Authors]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,30 @@
|
|||
# Speakeasy
|
||||
|
||||
This package provides cross-platform Go (#golang) helpers for taking user input
|
||||
from the terminal while not echoing the input back (similar to `getpasswd`). The
|
||||
package uses syscalls to avoid any dependence on cgo, and is therefore
|
||||
compatible with cross-compiling.
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc]
|
||||
|
||||
## Unicode
|
||||
|
||||
Multi-byte unicode characters work successfully on Mac OS X. On Windows,
|
||||
however, this may be problematic (as is UTF in general on Windows). Other
|
||||
platforms have not been tested.
|
||||
|
||||
## License
|
||||
|
||||
The code herein was not written by me, but was compiled from two separate open
|
||||
source packages. Unix portions were imported from [gopass][gopass], while
|
||||
Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s
|
||||
[Windows terminal helpers][cf-ui-windows].
|
||||
|
||||
The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly
|
||||
from the source (though I attempted to fill in the correct owner in the
|
||||
boilerplate copyright notice).
|
||||
|
||||
[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI"
|
||||
[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers"
|
||||
[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org"
|
||||
[gopass]: https://code.google.com/p/gopass "gopass"
|
|
@ -0,0 +1,18 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/bgentry/speakeasy"
|
||||
)
|
||||
|
||||
func main() {
|
||||
password, err := speakeasy.Ask("Please enter a password: ")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Password result: %q\n", password)
|
||||
fmt.Printf("Password len: %d\n", len(password))
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package speakeasy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Ask the user to enter a password with input hidden. prompt is a string to
|
||||
// display before the user's input. Returns the provided password, or an error
|
||||
// if the command failed.
|
||||
func Ask(prompt string) (password string, err error) {
|
||||
return FAsk(os.Stdout, prompt)
|
||||
}
|
||||
|
||||
// Same as the Ask function, except it is possible to specify the file to write
|
||||
// the prompt to.
|
||||
func FAsk(file *os.File, prompt string) (password string, err error) {
|
||||
if prompt != "" {
|
||||
fmt.Fprint(file, prompt) // Display the prompt.
|
||||
}
|
||||
password, err = getPassword()
|
||||
|
||||
// Carriage return after the user input.
|
||||
fmt.Fprintln(file, "")
|
||||
return
|
||||
}
|
||||
|
||||
func readline() (value string, err error) {
|
||||
var valb []byte
|
||||
var n int
|
||||
b := make([]byte, 1)
|
||||
for {
|
||||
// read one byte at a time so we don't accidentally read extra bytes
|
||||
n, err = os.Stdin.Read(b)
|
||||
if err != nil && err != io.EOF {
|
||||
return "", err
|
||||
}
|
||||
if n == 0 || b[0] == '\n' {
|
||||
break
|
||||
}
|
||||
valb = append(valb, b[0])
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(string(valb), "\r"), nil
|
||||
}
|
|
@ -0,0 +1,93 @@
|
|||
// based on https://code.google.com/p/gopass
|
||||
// Author: johnsiilver@gmail.com (John Doak)
|
||||
//
|
||||
// Original code is based on code by RogerV in the golang-nuts thread:
|
||||
// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247
|
||||
|
||||
// +build darwin freebsd linux netbsd openbsd solaris
|
||||
|
||||
package speakeasy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const sttyArg0 = "/bin/stty"
|
||||
|
||||
var (
|
||||
sttyArgvEOff = []string{"stty", "-echo"}
|
||||
sttyArgvEOn = []string{"stty", "echo"}
|
||||
)
|
||||
|
||||
// getPassword gets input hidden from the terminal from a user. This is
|
||||
// accomplished by turning off terminal echo, reading input from the user and
|
||||
// finally turning on terminal echo.
|
||||
func getPassword() (password string, err error) {
|
||||
sig := make(chan os.Signal, 10)
|
||||
brk := make(chan bool)
|
||||
|
||||
// File descriptors for stdin, stdout, and stderr.
|
||||
fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()}
|
||||
|
||||
// Setup notifications of termination signals to channel sig, create a process to
|
||||
// watch for these signals so we can turn back on echo if need be.
|
||||
signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT,
|
||||
syscall.SIGTERM)
|
||||
go catchSignal(fd, sig, brk)
|
||||
|
||||
// Turn off the terminal echo.
|
||||
pid, err := echoOff(fd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Turn on the terminal echo and stop listening for signals.
|
||||
defer signal.Stop(sig)
|
||||
defer close(brk)
|
||||
defer echoOn(fd)
|
||||
|
||||
syscall.Wait4(pid, nil, 0, nil)
|
||||
|
||||
line, err := readline()
|
||||
if err == nil {
|
||||
password = strings.TrimSpace(line)
|
||||
} else {
|
||||
err = fmt.Errorf("failed during password entry: %s", err)
|
||||
}
|
||||
|
||||
return password, err
|
||||
}
|
||||
|
||||
// echoOff turns off the terminal echo.
|
||||
func echoOff(fd []uintptr) (int, error) {
|
||||
pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd})
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err)
|
||||
}
|
||||
return pid, nil
|
||||
}
|
||||
|
||||
// echoOn turns back on the terminal echo.
|
||||
func echoOn(fd []uintptr) {
|
||||
// Turn on the terminal echo.
|
||||
pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd})
|
||||
if e == nil {
|
||||
syscall.Wait4(pid, nil, 0, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn
|
||||
// terminal echo back on before the program ends. Otherwise the user is left
|
||||
// with echo off on their terminal.
|
||||
func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) {
|
||||
select {
|
||||
case <-sig:
|
||||
echoOn(fd)
|
||||
os.Exit(-1)
|
||||
case <-brk:
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
// +build windows
|
||||
|
||||
package speakeasy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT:
|
||||
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
|
||||
const ENABLE_ECHO_INPUT = 0x0004
|
||||
|
||||
func getPassword() (password string, err error) {
|
||||
hStdin := syscall.Handle(os.Stdin.Fd())
|
||||
var oldMode uint32
|
||||
|
||||
err = syscall.GetConsoleMode(hStdin, &oldMode)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT)
|
||||
|
||||
err = setConsoleMode(hStdin, newMode)
|
||||
defer setConsoleMode(hStdin, oldMode)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return readline()
|
||||
}
|
||||
|
||||
func setConsoleMode(console syscall.Handle, mode uint32) (err error) {
|
||||
dll := syscall.MustLoadDLL("kernel32")
|
||||
proc := dll.MustFindProc("SetConsoleMode")
|
||||
r, _, err := proc.Call(uintptr(console), uintptr(mode))
|
||||
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
- tip
|
|
@ -0,0 +1,23 @@
|
|||
# Contributing
|
||||
|
||||
If you submit a pull request, please keep the following guidelines in mind:
|
||||
|
||||
1. Code should be `go fmt` compliant.
|
||||
2. Types, structs and funcs should be documented.
|
||||
3. Tests pass.
|
||||
|
||||
## Getting set up
|
||||
|
||||
Assuming your `$GOPATH` is set up according to your desires, run:
|
||||
|
||||
```sh
|
||||
go get github.com/digitalocean/godo
|
||||
```
|
||||
|
||||
## Running tests
|
||||
|
||||
When working on code in this repository, tests can be run via:
|
||||
|
||||
```sh
|
||||
go test .
|
||||
```
|
|
@ -0,0 +1,55 @@
|
|||
Copyright (c) 2014-2016 The godo AUTHORS. All rights reserved.
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
======================
|
||||
Portions of the client are based on code at:
|
||||
https://github.com/google/go-github/
|
||||
|
||||
Copyright (c) 2013 The go-github AUTHORS. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
[![Build Status](https://travis-ci.org/digitalocean/godo.svg)](https://travis-ci.org/digitalocean/godo)
|
||||
|
||||
# Godo
|
||||
|
||||
Godo is a Go client library for accessing the DigitalOcean V2 API.
|
||||
|
||||
You can view the client API docs here: [http://godoc.org/github.com/digitalocean/godo](http://godoc.org/github.com/digitalocean/godo)
|
||||
|
||||
You can view DigitalOcean API docs here: [https://developers.digitalocean.com/documentation/v2/](https://developers.digitalocean.com/documentation/v2/)
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
import "github.com/digitalocean/godo"
|
||||
```
|
||||
|
||||
Create a new DigitalOcean client, then use the exposed services to
|
||||
access different parts of the DigitalOcean API.
|
||||
|
||||
### Authentication
|
||||
|
||||
Currently, Personal Access Token (PAT) is the only method of
|
||||
authenticating with the API. You can manage your tokens
|
||||
at the DigitalOcean Control Panel [Applications Page](https://cloud.digitalocean.com/settings/applications).
|
||||
|
||||
You can then use your token to create a new client:
|
||||
|
||||
```go
|
||||
import "golang.org/x/oauth2"
|
||||
|
||||
pat := "mytoken"
|
||||
type TokenSource struct {
|
||||
AccessToken string
|
||||
}
|
||||
|
||||
func (t *TokenSource) Token() (*oauth2.Token, error) {
|
||||
token := &oauth2.Token{
|
||||
AccessToken: t.AccessToken,
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
tokenSource := &TokenSource{
|
||||
AccessToken: pat,
|
||||
}
|
||||
oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)
|
||||
client := godo.NewClient(oauthClient)
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
|
||||
To create a new Droplet:
|
||||
|
||||
```go
|
||||
dropletName := "super-cool-droplet"
|
||||
|
||||
createRequest := &godo.DropletCreateRequest{
|
||||
Name: dropletName,
|
||||
Region: "nyc3",
|
||||
Size: "512mb",
|
||||
Image: godo.DropletCreateImage{
|
||||
Slug: "ubuntu-14-04-x64",
|
||||
},
|
||||
}
|
||||
|
||||
newDroplet, _, err := client.Droplets.Create(createRequest)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Something bad happened: %s\n\n", err)
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
### Pagination
|
||||
|
||||
If a list of items is paginated by the API, you must request pages individually. For example, to fetch all Droplets:
|
||||
|
||||
```go
|
||||
func DropletList(client *godo.Client) ([]godo.Droplet, error) {
|
||||
// create a list to hold our droplets
|
||||
list := []godo.Droplet{}
|
||||
|
||||
// create options. initially, these will be blank
|
||||
opt := &godo.ListOptions{}
|
||||
for {
|
||||
droplets, resp, err := client.Droplets.List(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// append the current page's droplets to our list
|
||||
for _, d := range droplets {
|
||||
list = append(list, d)
|
||||
}
|
||||
|
||||
// if we are at the last page, break out the for loop
|
||||
if resp.Links == nil || resp.Links.IsLastPage() {
|
||||
break
|
||||
}
|
||||
|
||||
page, err := resp.Links.CurrentPage()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set the page we want for the next request
|
||||
opt.Page = page + 1
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
```
|
||||
|
||||
## Versioning
|
||||
|
||||
Each version of the client is tagged and the version is updated accordingly.
|
||||
|
||||
Since Go does not have a built-in versioning, a package management tool is
|
||||
recommended - a good one that works with git tags is
|
||||
[gopkg.in](http://labix.org/gopkg.in).
|
||||
|
||||
To see the list of past versions, run `git tag`.
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
For a comprehensive list of examples, check out the [API documentation](https://developers.digitalocean.com/documentation/v2/).
|
||||
|
||||
For details on all the functionality in this library, see the [GoDoc](http://godoc.org/github.com/digitalocean/godo) documentation.
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
We love pull requests! Please see the [contribution guidelines](CONTRIBUTING.md).
|
|
@ -0,0 +1,53 @@
|
|||
package godo
|
||||
|
||||
// AccountService is an interface for interfacing with the Account
|
||||
// endpoints of the DigitalOcean API
|
||||
// See: https://developers.digitalocean.com/documentation/v2/#account
|
||||
type AccountService interface {
|
||||
Get() (*Account, *Response, error)
|
||||
}
|
||||
|
||||
// AccountServiceOp handles communication with the Account related methods of
|
||||
// the DigitalOcean API.
|
||||
type AccountServiceOp struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
var _ AccountService = &AccountServiceOp{}
|
||||
|
||||
// Account represents a DigitalOcean Account
|
||||
type Account struct {
|
||||
DropletLimit int `json:"droplet_limit,omitempty"`
|
||||
FloatingIPLimit int `json:"floating_ip_limit,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
UUID string `json:"uuid,omitempty"`
|
||||
EmailVerified bool `json:"email_verified,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
StatusMessage string `json:"status_message,omitempty"`
|
||||
}
|
||||
|
||||
type accountRoot struct {
|
||||
Account *Account `json:"account"`
|
||||
}
|
||||
|
||||
func (r Account) String() string {
|
||||
return Stringify(r)
|
||||
}
|
||||
|
||||
// Get DigitalOcean account info
|
||||
func (s *AccountServiceOp) Get() (*Account, *Response, error) {
|
||||
path := "v2/account"
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(accountRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return root.Account, resp, err
|
||||
}
|
|
@ -0,0 +1,100 @@
|
|||
package godo
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
actionsBasePath = "v2/actions"
|
||||
|
||||
// ActionInProgress is an in progress action status
|
||||
ActionInProgress = "in-progress"
|
||||
|
||||
//ActionCompleted is a completed action status
|
||||
ActionCompleted = "completed"
|
||||
)
|
||||
|
||||
// ActionsService handles communction with action related methods of the
|
||||
// DigitalOcean API: https://developers.digitalocean.com/documentation/v2#actions
|
||||
type ActionsService interface {
|
||||
List(*ListOptions) ([]Action, *Response, error)
|
||||
Get(int) (*Action, *Response, error)
|
||||
}
|
||||
|
||||
// ActionsServiceOp handles communition with the image action related methods of the
|
||||
// DigitalOcean API.
|
||||
type ActionsServiceOp struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
var _ ActionsService = &ActionsServiceOp{}
|
||||
|
||||
type actionsRoot struct {
|
||||
Actions []Action `json:"actions"`
|
||||
Links *Links `json:"links"`
|
||||
}
|
||||
|
||||
type actionRoot struct {
|
||||
Event Action `json:"action"`
|
||||
}
|
||||
|
||||
// Action represents a DigitalOcean Action
|
||||
type Action struct {
|
||||
ID int `json:"id"`
|
||||
Status string `json:"status"`
|
||||
Type string `json:"type"`
|
||||
StartedAt *Timestamp `json:"started_at"`
|
||||
CompletedAt *Timestamp `json:"completed_at"`
|
||||
ResourceID int `json:"resource_id"`
|
||||
ResourceType string `json:"resource_type"`
|
||||
Region *Region `json:"region,omitempty"`
|
||||
RegionSlug string `json:"region_slug,omitempty"`
|
||||
}
|
||||
|
||||
// List all actions
|
||||
func (s *ActionsServiceOp) List(opt *ListOptions) ([]Action, *Response, error) {
|
||||
path := actionsBasePath
|
||||
path, err := addOptions(path, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(actionsRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.Actions, resp, err
|
||||
}
|
||||
|
||||
// Get an action by ID.
|
||||
func (s *ActionsServiceOp) Get(id int) (*Action, *Response, error) {
|
||||
if id < 1 {
|
||||
return nil, nil, NewArgError("id", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%d", actionsBasePath, id)
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(actionRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &root.Event, resp, err
|
||||
}
|
||||
|
||||
func (a Action) String() string {
|
||||
return Stringify(a)
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
// Package godo is the DigtalOcean API v2 client for Go
|
||||
package godo
|
|
@ -0,0 +1,323 @@
|
|||
package godo
|
||||
|
||||
import "fmt"
|
||||
|
||||
const domainsBasePath = "v2/domains"
|
||||
|
||||
// DomainsService is an interface for managing DNS with the DigitalOcean API.
|
||||
// See: https://developers.digitalocean.com/documentation/v2#domains and
|
||||
// https://developers.digitalocean.com/documentation/v2#domain-records
|
||||
type DomainsService interface {
|
||||
List(*ListOptions) ([]Domain, *Response, error)
|
||||
Get(string) (*Domain, *Response, error)
|
||||
Create(*DomainCreateRequest) (*Domain, *Response, error)
|
||||
Delete(string) (*Response, error)
|
||||
|
||||
Records(string, *ListOptions) ([]DomainRecord, *Response, error)
|
||||
Record(string, int) (*DomainRecord, *Response, error)
|
||||
DeleteRecord(string, int) (*Response, error)
|
||||
EditRecord(string, int, *DomainRecordEditRequest) (*DomainRecord, *Response, error)
|
||||
CreateRecord(string, *DomainRecordEditRequest) (*DomainRecord, *Response, error)
|
||||
}
|
||||
|
||||
// DomainsServiceOp handles communication with the domain related methods of the
|
||||
// DigitalOcean API.
|
||||
type DomainsServiceOp struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
var _ DomainsService = &DomainsServiceOp{}
|
||||
|
||||
// Domain represents a DigitalOcean domain
|
||||
type Domain struct {
|
||||
Name string `json:"name"`
|
||||
TTL int `json:"ttl"`
|
||||
ZoneFile string `json:"zone_file"`
|
||||
}
|
||||
|
||||
// domainRoot represents a response from the DigitalOcean API
|
||||
type domainRoot struct {
|
||||
Domain *Domain `json:"domain"`
|
||||
}
|
||||
|
||||
type domainsRoot struct {
|
||||
Domains []Domain `json:"domains"`
|
||||
Links *Links `json:"links"`
|
||||
}
|
||||
|
||||
// DomainCreateRequest respresents a request to create a domain.
|
||||
type DomainCreateRequest struct {
|
||||
Name string `json:"name"`
|
||||
IPAddress string `json:"ip_address"`
|
||||
}
|
||||
|
||||
// DomainRecordRoot is the root of an individual Domain Record response
|
||||
type domainRecordRoot struct {
|
||||
DomainRecord *DomainRecord `json:"domain_record"`
|
||||
}
|
||||
|
||||
// DomainRecordsRoot is the root of a group of Domain Record responses
|
||||
type domainRecordsRoot struct {
|
||||
DomainRecords []DomainRecord `json:"domain_records"`
|
||||
Links *Links `json:"links"`
|
||||
}
|
||||
|
||||
// DomainRecord represents a DigitalOcean DomainRecord
|
||||
type DomainRecord struct {
|
||||
ID int `json:"id,float64,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Data string `json:"data,omitempty"`
|
||||
Priority int `json:"priority,omitempty"`
|
||||
Port int `json:"port,omitempty"`
|
||||
Weight int `json:"weight,omitempty"`
|
||||
}
|
||||
|
||||
// DomainRecordEditRequest represents a request to update a domain record.
|
||||
type DomainRecordEditRequest struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Data string `json:"data,omitempty"`
|
||||
Priority int `json:"priority,omitempty"`
|
||||
Port int `json:"port,omitempty"`
|
||||
Weight int `json:"weight,omitempty"`
|
||||
}
|
||||
|
||||
func (d Domain) String() string {
|
||||
return Stringify(d)
|
||||
}
|
||||
|
||||
// List all domains.
|
||||
func (s DomainsServiceOp) List(opt *ListOptions) ([]Domain, *Response, error) {
|
||||
path := domainsBasePath
|
||||
path, err := addOptions(path, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(domainsRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.Domains, resp, err
|
||||
}
|
||||
|
||||
// Get individual domain. It requires a non-empty domain name.
|
||||
func (s *DomainsServiceOp) Get(name string) (*Domain, *Response, error) {
|
||||
if len(name) < 1 {
|
||||
return nil, nil, NewArgError("name", "cannot be an empty string")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%s", domainsBasePath, name)
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(domainRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return root.Domain, resp, err
|
||||
}
|
||||
|
||||
// Create a new domain
|
||||
func (s *DomainsServiceOp) Create(createRequest *DomainCreateRequest) (*Domain, *Response, error) {
|
||||
if createRequest == nil {
|
||||
return nil, nil, NewArgError("createRequest", "cannot be nil")
|
||||
}
|
||||
|
||||
path := domainsBasePath
|
||||
|
||||
req, err := s.client.NewRequest("POST", path, createRequest)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(domainRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return root.Domain, resp, err
|
||||
}
|
||||
|
||||
// Delete domain
|
||||
func (s *DomainsServiceOp) Delete(name string) (*Response, error) {
|
||||
if len(name) < 1 {
|
||||
return nil, NewArgError("name", "cannot be an empty string")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%s", domainsBasePath, name)
|
||||
|
||||
req, err := s.client.NewRequest("DELETE", path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.client.Do(req, nil)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Converts a DomainRecord to a string.
|
||||
func (d DomainRecord) String() string {
|
||||
return Stringify(d)
|
||||
}
|
||||
|
||||
// Converts a DomainRecordEditRequest to a string.
|
||||
func (d DomainRecordEditRequest) String() string {
|
||||
return Stringify(d)
|
||||
}
|
||||
|
||||
// Records returns a slice of DomainRecords for a domain
|
||||
func (s *DomainsServiceOp) Records(domain string, opt *ListOptions) ([]DomainRecord, *Response, error) {
|
||||
if len(domain) < 1 {
|
||||
return nil, nil, NewArgError("domain", "cannot be an empty string")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%s/records", domainsBasePath, domain)
|
||||
path, err := addOptions(path, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(domainRecordsRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.DomainRecords, resp, err
|
||||
}
|
||||
|
||||
// Record returns the record id from a domain
|
||||
func (s *DomainsServiceOp) Record(domain string, id int) (*DomainRecord, *Response, error) {
|
||||
if len(domain) < 1 {
|
||||
return nil, nil, NewArgError("domain", "cannot be an empty string")
|
||||
}
|
||||
|
||||
if id < 1 {
|
||||
return nil, nil, NewArgError("id", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id)
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
record := new(domainRecordRoot)
|
||||
resp, err := s.client.Do(req, record)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return record.DomainRecord, resp, err
|
||||
}
|
||||
|
||||
// DeleteRecord deletes a record from a domain identified by id
|
||||
func (s *DomainsServiceOp) DeleteRecord(domain string, id int) (*Response, error) {
|
||||
if len(domain) < 1 {
|
||||
return nil, NewArgError("domain", "cannot be an empty string")
|
||||
}
|
||||
|
||||
if id < 1 {
|
||||
return nil, NewArgError("id", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id)
|
||||
|
||||
req, err := s.client.NewRequest("DELETE", path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.client.Do(req, nil)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// EditRecord edits a record using a DomainRecordEditRequest
|
||||
func (s *DomainsServiceOp) EditRecord(
|
||||
domain string,
|
||||
id int,
|
||||
editRequest *DomainRecordEditRequest,
|
||||
) (*DomainRecord, *Response, error) {
|
||||
if len(domain) < 1 {
|
||||
return nil, nil, NewArgError("domain", "cannot be an empty string")
|
||||
}
|
||||
|
||||
if id < 1 {
|
||||
return nil, nil, NewArgError("id", "cannot be less than 1")
|
||||
}
|
||||
|
||||
if editRequest == nil {
|
||||
return nil, nil, NewArgError("editRequest", "cannot be nil")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id)
|
||||
|
||||
req, err := s.client.NewRequest("PUT", path, editRequest)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
d := new(DomainRecord)
|
||||
resp, err := s.client.Do(req, d)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return d, resp, err
|
||||
}
|
||||
|
||||
// CreateRecord creates a record using a DomainRecordEditRequest
|
||||
func (s *DomainsServiceOp) CreateRecord(
|
||||
domain string,
|
||||
createRequest *DomainRecordEditRequest) (*DomainRecord, *Response, error) {
|
||||
if len(domain) < 1 {
|
||||
return nil, nil, NewArgError("domain", "cannot be empty string")
|
||||
}
|
||||
|
||||
if createRequest == nil {
|
||||
return nil, nil, NewArgError("createRequest", "cannot be nil")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%s/records", domainsBasePath, domain)
|
||||
req, err := s.client.NewRequest("POST", path, createRequest)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
d := new(domainRecordRoot)
|
||||
resp, err := s.client.Do(req, d)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return d.DomainRecord, resp, err
|
||||
}
|
|
@ -0,0 +1,238 @@
|
|||
package godo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// ActionRequest reprents DigitalOcean Action Request
|
||||
type ActionRequest map[string]interface{}
|
||||
|
||||
// DropletActionsService is an interface for interfacing with the droplet actions
|
||||
// endpoints of the DigitalOcean API
|
||||
// See: https://developers.digitalocean.com/documentation/v2#droplet-actions
|
||||
type DropletActionsService interface {
|
||||
Shutdown(int) (*Action, *Response, error)
|
||||
PowerOff(int) (*Action, *Response, error)
|
||||
PowerOn(int) (*Action, *Response, error)
|
||||
PowerCycle(int) (*Action, *Response, error)
|
||||
Reboot(int) (*Action, *Response, error)
|
||||
Restore(int, int) (*Action, *Response, error)
|
||||
Resize(int, string, bool) (*Action, *Response, error)
|
||||
Rename(int, string) (*Action, *Response, error)
|
||||
Snapshot(int, string) (*Action, *Response, error)
|
||||
EnableBackups(int) (*Action, *Response, error)
|
||||
DisableBackups(int) (*Action, *Response, error)
|
||||
PasswordReset(int) (*Action, *Response, error)
|
||||
RebuildByImageID(int, int) (*Action, *Response, error)
|
||||
RebuildByImageSlug(int, string) (*Action, *Response, error)
|
||||
ChangeKernel(int, int) (*Action, *Response, error)
|
||||
EnableIPv6(int) (*Action, *Response, error)
|
||||
EnablePrivateNetworking(int) (*Action, *Response, error)
|
||||
Upgrade(int) (*Action, *Response, error)
|
||||
Get(int, int) (*Action, *Response, error)
|
||||
GetByURI(string) (*Action, *Response, error)
|
||||
}
|
||||
|
||||
// DropletActionsServiceOp handles communication with the droplet action related
|
||||
// methods of the DigitalOcean API.
|
||||
type DropletActionsServiceOp struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
var _ DropletActionsService = &DropletActionsServiceOp{}
|
||||
|
||||
// Shutdown a Droplet
|
||||
func (s *DropletActionsServiceOp) Shutdown(id int) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "shutdown"}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// PowerOff a Droplet
|
||||
func (s *DropletActionsServiceOp) PowerOff(id int) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "power_off"}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// PowerOn a Droplet
|
||||
func (s *DropletActionsServiceOp) PowerOn(id int) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "power_on"}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// PowerCycle a Droplet
|
||||
func (s *DropletActionsServiceOp) PowerCycle(id int) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "power_cycle"}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// Reboot a Droplet
|
||||
func (s *DropletActionsServiceOp) Reboot(id int) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "reboot"}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// Restore an image to a Droplet
|
||||
func (s *DropletActionsServiceOp) Restore(id, imageID int) (*Action, *Response, error) {
|
||||
requestType := "restore"
|
||||
request := &ActionRequest{
|
||||
"type": requestType,
|
||||
"image": float64(imageID),
|
||||
}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// Resize a Droplet
|
||||
func (s *DropletActionsServiceOp) Resize(id int, sizeSlug string, resizeDisk bool) (*Action, *Response, error) {
|
||||
requestType := "resize"
|
||||
request := &ActionRequest{
|
||||
"type": requestType,
|
||||
"size": sizeSlug,
|
||||
"disk": resizeDisk,
|
||||
}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// Rename a Droplet
|
||||
func (s *DropletActionsServiceOp) Rename(id int, name string) (*Action, *Response, error) {
|
||||
requestType := "rename"
|
||||
request := &ActionRequest{
|
||||
"type": requestType,
|
||||
"name": name,
|
||||
}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// Snapshot a Droplet.
|
||||
func (s *DropletActionsServiceOp) Snapshot(id int, name string) (*Action, *Response, error) {
|
||||
requestType := "snapshot"
|
||||
request := &ActionRequest{
|
||||
"type": requestType,
|
||||
"name": name,
|
||||
}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// EnableBackups enables backups for a droplet.
|
||||
func (s *DropletActionsServiceOp) EnableBackups(id int) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "enable_backups"}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// DisableBackups disables backups for a droplet.
|
||||
func (s *DropletActionsServiceOp) DisableBackups(id int) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "disable_backups"}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// PasswordReset resets the password for a droplet.
|
||||
func (s *DropletActionsServiceOp) PasswordReset(id int) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "password_reset"}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// RebuildByImageID rebuilds a droplet droplet from an image with a given id.
|
||||
func (s *DropletActionsServiceOp) RebuildByImageID(id, imageID int) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "rebuild", "image": imageID}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// RebuildByImageSlug rebuilds a droplet from an image with a given slug.
|
||||
func (s *DropletActionsServiceOp) RebuildByImageSlug(id int, slug string) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "rebuild", "image": slug}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// ChangeKernel changes the kernel for a droplet.
|
||||
func (s *DropletActionsServiceOp) ChangeKernel(id, kernelID int) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "change_kernel", "kernel": kernelID}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// EnableIPv6 enables IPv6 for a droplet.
|
||||
func (s *DropletActionsServiceOp) EnableIPv6(id int) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "enable_ipv6"}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// EnablePrivateNetworking enables private networking for a droplet.
|
||||
func (s *DropletActionsServiceOp) EnablePrivateNetworking(id int) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "enable_private_networking"}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
// Upgrade a droplet.
|
||||
func (s *DropletActionsServiceOp) Upgrade(id int) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "upgrade"}
|
||||
return s.doAction(id, request)
|
||||
}
|
||||
|
||||
func (s *DropletActionsServiceOp) doAction(id int, request *ActionRequest) (*Action, *Response, error) {
|
||||
if id < 1 {
|
||||
return nil, nil, NewArgError("id", "cannot be less than 1")
|
||||
}
|
||||
|
||||
if request == nil {
|
||||
return nil, nil, NewArgError("request", "request can't be nil")
|
||||
}
|
||||
|
||||
path := dropletActionPath(id)
|
||||
|
||||
req, err := s.client.NewRequest("POST", path, request)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(actionRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &root.Event, resp, err
|
||||
}
|
||||
|
||||
// Get an action for a particular droplet by id.
|
||||
func (s *DropletActionsServiceOp) Get(dropletID, actionID int) (*Action, *Response, error) {
|
||||
if dropletID < 1 {
|
||||
return nil, nil, NewArgError("dropletID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
if actionID < 1 {
|
||||
return nil, nil, NewArgError("actionID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%d", dropletActionPath(dropletID), actionID)
|
||||
return s.get(path)
|
||||
}
|
||||
|
||||
// GetByURI gets an action for a particular droplet by id.
|
||||
func (s *DropletActionsServiceOp) GetByURI(rawurl string) (*Action, *Response, error) {
|
||||
u, err := url.Parse(rawurl)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return s.get(u.Path)
|
||||
|
||||
}
|
||||
|
||||
func (s *DropletActionsServiceOp) get(path string) (*Action, *Response, error) {
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(actionRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &root.Event, resp, err
|
||||
|
||||
}
|
||||
|
||||
func dropletActionPath(dropletID int) string {
|
||||
return fmt.Sprintf("v2/droplets/%d/actions", dropletID)
|
||||
}
|
|
@ -0,0 +1,495 @@
|
|||
package godo
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const dropletBasePath = "v2/droplets"
|
||||
|
||||
var errNoNetworks = errors.New("no networks have been defined")
|
||||
|
||||
// DropletsService is an interface for interfacing with the droplet
|
||||
// endpoints of the DigitalOcean API
|
||||
// See: https://developers.digitalocean.com/documentation/v2#droplets
|
||||
type DropletsService interface {
|
||||
List(*ListOptions) ([]Droplet, *Response, error)
|
||||
Get(int) (*Droplet, *Response, error)
|
||||
Create(*DropletCreateRequest) (*Droplet, *Response, error)
|
||||
CreateMultiple(*DropletMultiCreateRequest) ([]Droplet, *Response, error)
|
||||
Delete(int) (*Response, error)
|
||||
Kernels(int, *ListOptions) ([]Kernel, *Response, error)
|
||||
Snapshots(int, *ListOptions) ([]Image, *Response, error)
|
||||
Backups(int, *ListOptions) ([]Image, *Response, error)
|
||||
Actions(int, *ListOptions) ([]Action, *Response, error)
|
||||
Neighbors(int) ([]Droplet, *Response, error)
|
||||
}
|
||||
|
||||
// DropletsServiceOp handles communication with the droplet related methods of the
|
||||
// DigitalOcean API.
|
||||
type DropletsServiceOp struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
var _ DropletsService = &DropletsServiceOp{}
|
||||
|
||||
// Droplet represents a DigitalOcean Droplet
|
||||
type Droplet struct {
|
||||
ID int `json:"id,float64,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Memory int `json:"memory,omitempty"`
|
||||
Vcpus int `json:"vcpus,omitempty"`
|
||||
Disk int `json:"disk,omitempty"`
|
||||
Region *Region `json:"region,omitempty"`
|
||||
Image *Image `json:"image,omitempty"`
|
||||
Size *Size `json:"size,omitempty"`
|
||||
SizeSlug string `json:"size_slug,omitempty"`
|
||||
BackupIDs []int `json:"backup_ids,omitempty"`
|
||||
SnapshotIDs []int `json:"snapshot_ids,omitempty"`
|
||||
Locked bool `json:"locked,bool,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
Networks *Networks `json:"networks,omitempty"`
|
||||
ActionIDs []int `json:"action_ids,omitempty"`
|
||||
Created string `json:"created_at,omitempty"`
|
||||
Kernel *Kernel `json:"kernel, omitempty"`
|
||||
}
|
||||
|
||||
// PublicIPv4 returns the public IPv4 address for the Droplet.
|
||||
func (d *Droplet) PublicIPv4() (string, error) {
|
||||
if d.Networks == nil {
|
||||
return "", errNoNetworks
|
||||
}
|
||||
|
||||
for _, v4 := range d.Networks.V4 {
|
||||
if v4.Type == "public" {
|
||||
return v4.IPAddress, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// PrivateIPv4 returns the private IPv4 address for the Droplet.
|
||||
func (d *Droplet) PrivateIPv4() (string, error) {
|
||||
if d.Networks == nil {
|
||||
return "", errNoNetworks
|
||||
}
|
||||
|
||||
for _, v4 := range d.Networks.V4 {
|
||||
if v4.Type == "private" {
|
||||
return v4.IPAddress, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// PublicIPv6 returns the private IPv6 address for the Droplet.
|
||||
func (d *Droplet) PublicIPv6() (string, error) {
|
||||
if d.Networks == nil {
|
||||
return "", errNoNetworks
|
||||
}
|
||||
|
||||
for _, v4 := range d.Networks.V6 {
|
||||
if v4.Type == "public" {
|
||||
return v4.IPAddress, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Kernel object
|
||||
type Kernel struct {
|
||||
ID int `json:"id,float64,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
// Convert Droplet to a string
|
||||
func (d Droplet) String() string {
|
||||
return Stringify(d)
|
||||
}
|
||||
|
||||
// DropletRoot represents a Droplet root
|
||||
type dropletRoot struct {
|
||||
Droplet *Droplet `json:"droplet"`
|
||||
Links *Links `json:"links,omitempty"`
|
||||
}
|
||||
|
||||
type dropletsRoot struct {
|
||||
Droplets []Droplet `json:"droplets"`
|
||||
Links *Links `json:"links"`
|
||||
}
|
||||
|
||||
type kernelsRoot struct {
|
||||
Kernels []Kernel `json:"kernels,omitempty"`
|
||||
Links *Links `json:"links"`
|
||||
}
|
||||
|
||||
type snapshotsRoot struct {
|
||||
Snapshots []Image `json:"snapshots,omitempty"`
|
||||
Links *Links `json:"links"`
|
||||
}
|
||||
|
||||
type backupsRoot struct {
|
||||
Backups []Image `json:"backups,omitempty"`
|
||||
Links *Links `json:"links"`
|
||||
}
|
||||
|
||||
// DropletCreateImage identifies an image for the create request. It prefers slug over ID.
|
||||
type DropletCreateImage struct {
|
||||
ID int
|
||||
Slug string
|
||||
}
|
||||
|
||||
// MarshalJSON returns either the slug or id of the image. It returns the id
|
||||
// if the slug is empty.
|
||||
func (d DropletCreateImage) MarshalJSON() ([]byte, error) {
|
||||
if d.Slug != "" {
|
||||
return json.Marshal(d.Slug)
|
||||
}
|
||||
|
||||
return json.Marshal(d.ID)
|
||||
}
|
||||
|
||||
// DropletCreateSSHKey identifies a SSH Key for the create request. It prefers fingerprint over ID.
|
||||
type DropletCreateSSHKey struct {
|
||||
ID int
|
||||
Fingerprint string
|
||||
}
|
||||
|
||||
// MarshalJSON returns either the fingerprint or id of the ssh key. It returns
|
||||
// the id if the fingerprint is empty.
|
||||
func (d DropletCreateSSHKey) MarshalJSON() ([]byte, error) {
|
||||
if d.Fingerprint != "" {
|
||||
return json.Marshal(d.Fingerprint)
|
||||
}
|
||||
|
||||
return json.Marshal(d.ID)
|
||||
}
|
||||
|
||||
// DropletCreateRequest represents a request to create a droplet.
|
||||
type DropletCreateRequest struct {
|
||||
Name string `json:"name"`
|
||||
Region string `json:"region"`
|
||||
Size string `json:"size"`
|
||||
Image DropletCreateImage `json:"image"`
|
||||
SSHKeys []DropletCreateSSHKey `json:"ssh_keys"`
|
||||
Backups bool `json:"backups"`
|
||||
IPv6 bool `json:"ipv6"`
|
||||
PrivateNetworking bool `json:"private_networking"`
|
||||
UserData string `json:"user_data,omitempty"`
|
||||
}
|
||||
|
||||
// DropletMultiCreateRequest is a request to create multiple droplets.
|
||||
type DropletMultiCreateRequest struct {
|
||||
Names []string `json:"names"`
|
||||
Region string `json:"region"`
|
||||
Size string `json:"size"`
|
||||
Image DropletCreateImage `json:"image"`
|
||||
SSHKeys []DropletCreateSSHKey `json:"ssh_keys"`
|
||||
Backups bool `json:"backups"`
|
||||
IPv6 bool `json:"ipv6"`
|
||||
PrivateNetworking bool `json:"private_networking"`
|
||||
UserData string `json:"user_data,omitempty"`
|
||||
}
|
||||
|
||||
func (d DropletCreateRequest) String() string {
|
||||
return Stringify(d)
|
||||
}
|
||||
|
||||
func (d DropletMultiCreateRequest) String() string {
|
||||
return Stringify(d)
|
||||
}
|
||||
|
||||
// Networks represents the droplet's networks
|
||||
type Networks struct {
|
||||
V4 []NetworkV4 `json:"v4,omitempty"`
|
||||
V6 []NetworkV6 `json:"v6,omitempty"`
|
||||
}
|
||||
|
||||
// NetworkV4 represents a DigitalOcean IPv4 Network
|
||||
type NetworkV4 struct {
|
||||
IPAddress string `json:"ip_address,omitempty"`
|
||||
Netmask string `json:"netmask,omitempty"`
|
||||
Gateway string `json:"gateway,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
func (n NetworkV4) String() string {
|
||||
return Stringify(n)
|
||||
}
|
||||
|
||||
// NetworkV6 represents a DigitalOcean IPv6 network.
|
||||
type NetworkV6 struct {
|
||||
IPAddress string `json:"ip_address,omitempty"`
|
||||
Netmask int `json:"netmask,omitempty"`
|
||||
Gateway string `json:"gateway,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
func (n NetworkV6) String() string {
|
||||
return Stringify(n)
|
||||
}
|
||||
|
||||
// List all droplets
|
||||
func (s *DropletsServiceOp) List(opt *ListOptions) ([]Droplet, *Response, error) {
|
||||
path := dropletBasePath
|
||||
path, err := addOptions(path, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(dropletsRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.Droplets, resp, err
|
||||
}
|
||||
|
||||
// Get individual droplet
|
||||
func (s *DropletsServiceOp) Get(dropletID int) (*Droplet, *Response, error) {
|
||||
if dropletID < 1 {
|
||||
return nil, nil, NewArgError("dropletID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%d", dropletBasePath, dropletID)
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(dropletRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return root.Droplet, resp, err
|
||||
}
|
||||
|
||||
// Create droplet
|
||||
func (s *DropletsServiceOp) Create(createRequest *DropletCreateRequest) (*Droplet, *Response, error) {
|
||||
if createRequest == nil {
|
||||
return nil, nil, NewArgError("createRequest", "cannot be nil")
|
||||
}
|
||||
|
||||
path := dropletBasePath
|
||||
|
||||
req, err := s.client.NewRequest("POST", path, createRequest)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(dropletRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.Droplet, resp, err
|
||||
}
|
||||
|
||||
// CreateMultiple creates multiple droplets.
|
||||
func (s *DropletsServiceOp) CreateMultiple(createRequest *DropletMultiCreateRequest) ([]Droplet, *Response, error) {
|
||||
if createRequest == nil {
|
||||
return nil, nil, NewArgError("createRequest", "cannot be nil")
|
||||
}
|
||||
|
||||
path := dropletBasePath
|
||||
|
||||
req, err := s.client.NewRequest("POST", path, createRequest)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(dropletsRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.Droplets, resp, err
|
||||
}
|
||||
|
||||
// Delete droplet
|
||||
func (s *DropletsServiceOp) Delete(dropletID int) (*Response, error) {
|
||||
if dropletID < 1 {
|
||||
return nil, NewArgError("dropletID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%d", dropletBasePath, dropletID)
|
||||
|
||||
req, err := s.client.NewRequest("DELETE", path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.client.Do(req, nil)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Kernels lists kernels available for a droplet.
|
||||
func (s *DropletsServiceOp) Kernels(dropletID int, opt *ListOptions) ([]Kernel, *Response, error) {
|
||||
if dropletID < 1 {
|
||||
return nil, nil, NewArgError("dropletID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%d/kernels", dropletBasePath, dropletID)
|
||||
path, err := addOptions(path, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(kernelsRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.Kernels, resp, err
|
||||
}
|
||||
|
||||
// Actions lists the actions for a droplet.
|
||||
func (s *DropletsServiceOp) Actions(dropletID int, opt *ListOptions) ([]Action, *Response, error) {
|
||||
if dropletID < 1 {
|
||||
return nil, nil, NewArgError("dropletID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%d/actions", dropletBasePath, dropletID)
|
||||
path, err := addOptions(path, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(actionsRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.Actions, resp, err
|
||||
}
|
||||
|
||||
// Backups lists the backups for a droplet.
|
||||
func (s *DropletsServiceOp) Backups(dropletID int, opt *ListOptions) ([]Image, *Response, error) {
|
||||
if dropletID < 1 {
|
||||
return nil, nil, NewArgError("dropletID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%d/backups", dropletBasePath, dropletID)
|
||||
path, err := addOptions(path, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(backupsRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.Backups, resp, err
|
||||
}
|
||||
|
||||
// Snapshots lists the snapshots available for a droplet.
|
||||
func (s *DropletsServiceOp) Snapshots(dropletID int, opt *ListOptions) ([]Image, *Response, error) {
|
||||
if dropletID < 1 {
|
||||
return nil, nil, NewArgError("dropletID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%d/snapshots", dropletBasePath, dropletID)
|
||||
path, err := addOptions(path, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(snapshotsRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.Snapshots, resp, err
|
||||
}
|
||||
|
||||
// Neighbors lists the neighbors for a droplet.
|
||||
func (s *DropletsServiceOp) Neighbors(dropletID int) ([]Droplet, *Response, error) {
|
||||
if dropletID < 1 {
|
||||
return nil, nil, NewArgError("dropletID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%d/neighbors", dropletBasePath, dropletID)
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(dropletsRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return root.Droplets, resp, err
|
||||
}
|
||||
|
||||
func (s *DropletsServiceOp) dropletActionStatus(uri string) (string, error) {
|
||||
action, _, err := s.client.DropletActions.GetByURI(uri)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return action.Status, nil
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
package godo
|
||||
|
||||
import "fmt"
|
||||
|
||||
// ArgError is an error that represents an error with an input to godo. It
|
||||
// identifies the argument and the cause (if possible).
|
||||
type ArgError struct {
|
||||
arg string
|
||||
reason string
|
||||
}
|
||||
|
||||
var _ error = &ArgError{}
|
||||
|
||||
// NewArgError creates an InputError.
|
||||
func NewArgError(arg, reason string) *ArgError {
|
||||
return &ArgError{
|
||||
arg: arg,
|
||||
reason: reason,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ArgError) Error() string {
|
||||
return fmt.Sprintf("%s is invalid because %s", e.arg, e.reason)
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
package godo
|
||||
|
||||
import "fmt"
|
||||
|
||||
const floatingBasePath = "v2/floating_ips"
|
||||
|
||||
// FloatingIPsService is an interface for interfacing with the floating IPs
|
||||
// endpoints of the Digital Ocean API.
|
||||
// See: https://developers.digitalocean.com/documentation/v2#floating-ips
|
||||
type FloatingIPsService interface {
|
||||
List(*ListOptions) ([]FloatingIP, *Response, error)
|
||||
Get(string) (*FloatingIP, *Response, error)
|
||||
Create(*FloatingIPCreateRequest) (*FloatingIP, *Response, error)
|
||||
Delete(string) (*Response, error)
|
||||
}
|
||||
|
||||
// FloatingIPsServiceOp handles communication with the floating IPs related methods of the
|
||||
// DigitalOcean API.
|
||||
type FloatingIPsServiceOp struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
var _ FloatingIPsService = &FloatingIPsServiceOp{}
|
||||
|
||||
// FloatingIP represents a Digital Ocean floating IP.
|
||||
type FloatingIP struct {
|
||||
Region *Region `json:"region"`
|
||||
Droplet *Droplet `json:"droplet"`
|
||||
IP string `json:"ip"`
|
||||
}
|
||||
|
||||
func (f FloatingIP) String() string {
|
||||
return Stringify(f)
|
||||
}
|
||||
|
||||
type floatingIPsRoot struct {
|
||||
FloatingIPs []FloatingIP `json:"floating_ips"`
|
||||
Links *Links `json:"links"`
|
||||
}
|
||||
|
||||
type floatingIPRoot struct {
|
||||
FloatingIP *FloatingIP `json:"floating_ip"`
|
||||
Links *Links `json:"links,omitempty"`
|
||||
}
|
||||
|
||||
// FloatingIPCreateRequest represents a request to create a floating IP.
|
||||
// If DropletID is not empty, the floating IP will be assigned to the
|
||||
// droplet.
|
||||
type FloatingIPCreateRequest struct {
|
||||
Region string `json:"region"`
|
||||
DropletID int `json:"droplet_id,omitempty"`
|
||||
}
|
||||
|
||||
// List all floating IPs.
|
||||
func (f *FloatingIPsServiceOp) List(opt *ListOptions) ([]FloatingIP, *Response, error) {
|
||||
path := floatingBasePath
|
||||
path, err := addOptions(path, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := f.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(floatingIPsRoot)
|
||||
resp, err := f.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.FloatingIPs, resp, err
|
||||
}
|
||||
|
||||
// Get an individual floating IP.
|
||||
func (f *FloatingIPsServiceOp) Get(ip string) (*FloatingIP, *Response, error) {
|
||||
path := fmt.Sprintf("%s/%s", floatingBasePath, ip)
|
||||
|
||||
req, err := f.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(floatingIPRoot)
|
||||
resp, err := f.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return root.FloatingIP, resp, err
|
||||
}
|
||||
|
||||
// Create a floating IP. If the DropletID field of the request is not empty,
|
||||
// the floating IP will also be assigned to the droplet.
|
||||
func (f *FloatingIPsServiceOp) Create(createRequest *FloatingIPCreateRequest) (*FloatingIP, *Response, error) {
|
||||
path := floatingBasePath
|
||||
|
||||
req, err := f.client.NewRequest("POST", path, createRequest)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(floatingIPRoot)
|
||||
resp, err := f.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.FloatingIP, resp, err
|
||||
}
|
||||
|
||||
// Delete a floating IP.
|
||||
func (f *FloatingIPsServiceOp) Delete(ip string) (*Response, error) {
|
||||
path := fmt.Sprintf("%s/%s", floatingBasePath, ip)
|
||||
|
||||
req, err := f.client.NewRequest("DELETE", path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req, nil)
|
||||
|
||||
return resp, err
|
||||
}
|
|
@ -0,0 +1,105 @@
|
|||
package godo
|
||||
|
||||
import "fmt"
|
||||
|
||||
// FloatingIPActionsService is an interface for interfacing with the
|
||||
// floating IPs actions endpoints of the Digital Ocean API.
|
||||
// See: https://developers.digitalocean.com/documentation/v2#floating-ips-action
|
||||
type FloatingIPActionsService interface {
|
||||
Assign(ip string, dropletID int) (*Action, *Response, error)
|
||||
Unassign(ip string) (*Action, *Response, error)
|
||||
Get(ip string, actionID int) (*Action, *Response, error)
|
||||
List(ip string, opt *ListOptions) ([]Action, *Response, error)
|
||||
}
|
||||
|
||||
// FloatingIPActionsServiceOp handles communication with the floating IPs
|
||||
// action related methods of the DigitalOcean API.
|
||||
type FloatingIPActionsServiceOp struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// Assign a floating IP to a droplet.
|
||||
func (s *FloatingIPActionsServiceOp) Assign(ip string, dropletID int) (*Action, *Response, error) {
|
||||
request := &ActionRequest{
|
||||
"type": "assign",
|
||||
"droplet_id": dropletID,
|
||||
}
|
||||
return s.doAction(ip, request)
|
||||
}
|
||||
|
||||
// Unassign a floating IP from the droplet it is currently assigned to.
|
||||
func (s *FloatingIPActionsServiceOp) Unassign(ip string) (*Action, *Response, error) {
|
||||
request := &ActionRequest{"type": "unassign"}
|
||||
return s.doAction(ip, request)
|
||||
}
|
||||
|
||||
// Get an action for a particular floating IP by id.
|
||||
func (s *FloatingIPActionsServiceOp) Get(ip string, actionID int) (*Action, *Response, error) {
|
||||
path := fmt.Sprintf("%s/%d", floatingIPActionPath(ip), actionID)
|
||||
return s.get(path)
|
||||
}
|
||||
|
||||
// List the actions for a particular floating IP.
|
||||
func (s *FloatingIPActionsServiceOp) List(ip string, opt *ListOptions) ([]Action, *Response, error) {
|
||||
path := floatingIPActionPath(ip)
|
||||
path, err := addOptions(path, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return s.list(path)
|
||||
}
|
||||
|
||||
func (s *FloatingIPActionsServiceOp) doAction(ip string, request *ActionRequest) (*Action, *Response, error) {
|
||||
path := floatingIPActionPath(ip)
|
||||
|
||||
req, err := s.client.NewRequest("POST", path, request)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(actionRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &root.Event, resp, err
|
||||
}
|
||||
|
||||
func (s *FloatingIPActionsServiceOp) get(path string) (*Action, *Response, error) {
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(actionRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &root.Event, resp, err
|
||||
}
|
||||
|
||||
func (s *FloatingIPActionsServiceOp) list(path string) ([]Action, *Response, error) {
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(actionsRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.Actions, resp, err
|
||||
}
|
||||
|
||||
func floatingIPActionPath(ip string) string {
|
||||
return fmt.Sprintf("%s/%s/actions", floatingBasePath, ip)
|
||||
}
|
|
@ -0,0 +1,341 @@
|
|||
package godo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-querystring/query"
|
||||
headerLink "github.com/tent/http-link-go"
|
||||
)
|
||||
|
||||
const (
|
||||
libraryVersion = "0.1.0"
|
||||
defaultBaseURL = "https://api.digitalocean.com/"
|
||||
userAgent = "godo/" + libraryVersion
|
||||
mediaType = "application/json"
|
||||
|
||||
headerRateLimit = "RateLimit-Limit"
|
||||
headerRateRemaining = "RateLimit-Remaining"
|
||||
headerRateReset = "RateLimit-Reset"
|
||||
)
|
||||
|
||||
// Client manages communication with DigitalOcean V2 API.
|
||||
type Client struct {
|
||||
// HTTP client used to communicate with the DO API.
|
||||
client *http.Client
|
||||
|
||||
// Base URL for API requests.
|
||||
BaseURL *url.URL
|
||||
|
||||
// User agent for client
|
||||
UserAgent string
|
||||
|
||||
// Rate contains the current rate limit for the client as determined by the most recent
|
||||
// API call.
|
||||
Rate Rate
|
||||
|
||||
// Services used for communicating with the API
|
||||
Account AccountService
|
||||
Actions ActionsService
|
||||
Domains DomainsService
|
||||
Droplets DropletsService
|
||||
DropletActions DropletActionsService
|
||||
Images ImagesService
|
||||
ImageActions ImageActionsService
|
||||
Keys KeysService
|
||||
Regions RegionsService
|
||||
Sizes SizesService
|
||||
FloatingIPs FloatingIPsService
|
||||
FloatingIPActions FloatingIPActionsService
|
||||
|
||||
// Optional function called after every successful request made to the DO APIs
|
||||
onRequestCompleted RequestCompletionCallback
|
||||
}
|
||||
|
||||
// RequestCompletionCallback defines the type of the request callback function
|
||||
type RequestCompletionCallback func(*http.Request, *http.Response)
|
||||
|
||||
// ListOptions specifies the optional parameters to various List methods that
|
||||
// support pagination.
|
||||
type ListOptions struct {
|
||||
// For paginated result sets, page of results to retrieve.
|
||||
Page int `url:"page,omitempty"`
|
||||
|
||||
// For paginated result sets, the number of results to include per page.
|
||||
PerPage int `url:"per_page,omitempty"`
|
||||
}
|
||||
|
||||
// Response is a DigitalOcean response. This wraps the standard http.Response returned from DigitalOcean.
|
||||
type Response struct {
|
||||
*http.Response
|
||||
|
||||
// Links that were returned with the response. These are parsed from
|
||||
// request body and not the header.
|
||||
Links *Links
|
||||
|
||||
// Monitoring URI
|
||||
Monitor string
|
||||
|
||||
Rate
|
||||
}
|
||||
|
||||
// An ErrorResponse reports the error caused by an API request
|
||||
type ErrorResponse struct {
|
||||
// HTTP response that caused this error
|
||||
Response *http.Response
|
||||
|
||||
// Error message
|
||||
Message string
|
||||
}
|
||||
|
||||
// Rate contains the rate limit for the current client.
|
||||
type Rate struct {
|
||||
// The number of request per hour the client is currently limited to.
|
||||
Limit int `json:"limit"`
|
||||
|
||||
// The number of remaining requests the client can make this hour.
|
||||
Remaining int `json:"remaining"`
|
||||
|
||||
// The time at which the current rate limit will reset.
|
||||
Reset Timestamp `json:"reset"`
|
||||
}
|
||||
|
||||
func addOptions(s string, opt interface{}) (string, error) {
|
||||
v := reflect.ValueOf(opt)
|
||||
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
origURL, err := url.Parse(s)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
|
||||
origValues := origURL.Query()
|
||||
|
||||
newValues, err := query.Values(opt)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
|
||||
for k, v := range newValues {
|
||||
origValues[k] = v
|
||||
}
|
||||
|
||||
origURL.RawQuery = origValues.Encode()
|
||||
return origURL.String(), nil
|
||||
}
|
||||
|
||||
// NewClient returns a new DigitalOcean API client.
|
||||
func NewClient(httpClient *http.Client) *Client {
|
||||
if httpClient == nil {
|
||||
httpClient = http.DefaultClient
|
||||
}
|
||||
|
||||
baseURL, _ := url.Parse(defaultBaseURL)
|
||||
|
||||
c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent}
|
||||
c.Account = &AccountServiceOp{client: c}
|
||||
c.Actions = &ActionsServiceOp{client: c}
|
||||
c.Domains = &DomainsServiceOp{client: c}
|
||||
c.Droplets = &DropletsServiceOp{client: c}
|
||||
c.DropletActions = &DropletActionsServiceOp{client: c}
|
||||
c.Images = &ImagesServiceOp{client: c}
|
||||
c.ImageActions = &ImageActionsServiceOp{client: c}
|
||||
c.Keys = &KeysServiceOp{client: c}
|
||||
c.Regions = &RegionsServiceOp{client: c}
|
||||
c.Sizes = &SizesServiceOp{client: c}
|
||||
c.FloatingIPs = &FloatingIPsServiceOp{client: c}
|
||||
c.FloatingIPActions = &FloatingIPActionsServiceOp{client: c}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// NewRequest creates an API request. A relative URL can be provided in urlStr, which will be resolved to the
|
||||
// BaseURL of the Client. Relative URLS should always be specified without a preceding slash. If specified, the
|
||||
// value pointed to by body is JSON encoded and included in as the request body.
|
||||
func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) {
|
||||
rel, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u := c.BaseURL.ResolveReference(rel)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if body != nil {
|
||||
err := json.NewEncoder(buf).Encode(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, u.String(), buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Add("Content-Type", mediaType)
|
||||
req.Header.Add("Accept", mediaType)
|
||||
req.Header.Add("User-Agent", userAgent)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// OnRequestCompleted sets the DO API request completion callback
|
||||
func (c *Client) OnRequestCompleted(rc RequestCompletionCallback) {
|
||||
c.onRequestCompleted = rc
|
||||
}
|
||||
|
||||
// newResponse creates a new Response for the provided http.Response
|
||||
func newResponse(r *http.Response) *Response {
|
||||
response := Response{Response: r}
|
||||
response.populateRate()
|
||||
|
||||
return &response
|
||||
}
|
||||
|
||||
func (r *Response) links() (map[string]headerLink.Link, error) {
|
||||
if linkText, ok := r.Response.Header["Link"]; ok {
|
||||
links, err := headerLink.Parse(linkText[0])
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
linkMap := map[string]headerLink.Link{}
|
||||
for _, link := range links {
|
||||
linkMap[link.Rel] = link
|
||||
}
|
||||
|
||||
return linkMap, nil
|
||||
}
|
||||
|
||||
return map[string]headerLink.Link{}, nil
|
||||
}
|
||||
|
||||
// populateRate parses the rate related headers and populates the response Rate.
|
||||
func (r *Response) populateRate() {
|
||||
if limit := r.Header.Get(headerRateLimit); limit != "" {
|
||||
r.Rate.Limit, _ = strconv.Atoi(limit)
|
||||
}
|
||||
if remaining := r.Header.Get(headerRateRemaining); remaining != "" {
|
||||
r.Rate.Remaining, _ = strconv.Atoi(remaining)
|
||||
}
|
||||
if reset := r.Header.Get(headerRateReset); reset != "" {
|
||||
if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 {
|
||||
r.Rate.Reset = Timestamp{time.Unix(v, 0)}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Do sends an API request and returns the API response. The API response is JSON decoded and stored in the value
|
||||
// pointed to by v, or returned as an error if an API error has occurred. If v implements the io.Writer interface,
|
||||
// the raw response will be written to v, without attempting to decode it.
|
||||
func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) {
|
||||
resp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.onRequestCompleted != nil {
|
||||
c.onRequestCompleted(req, resp)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if rerr := resp.Body.Close(); err == nil {
|
||||
err = rerr
|
||||
}
|
||||
}()
|
||||
|
||||
response := newResponse(resp)
|
||||
c.Rate = response.Rate
|
||||
|
||||
err = CheckResponse(resp)
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
if v != nil {
|
||||
if w, ok := v.(io.Writer); ok {
|
||||
_, err := io.Copy(w, resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err := json.NewDecoder(resp.Body).Decode(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return response, err
|
||||
}
|
||||
func (r *ErrorResponse) Error() string {
|
||||
return fmt.Sprintf("%v %v: %d %v",
|
||||
r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, r.Message)
|
||||
}
|
||||
|
||||
// CheckResponse checks the API response for errors, and returns them if present. A response is considered an
|
||||
// error if it has a status code outside the 200 range. API error responses are expected to have either no response
|
||||
// body, or a JSON response body that maps to ErrorResponse. Any other response body will be silently ignored.
|
||||
func CheckResponse(r *http.Response) error {
|
||||
if c := r.StatusCode; c >= 200 && c <= 299 {
|
||||
return nil
|
||||
}
|
||||
|
||||
errorResponse := &ErrorResponse{Response: r}
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err == nil && len(data) > 0 {
|
||||
err := json.Unmarshal(data, errorResponse)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return errorResponse
|
||||
}
|
||||
|
||||
func (r Rate) String() string {
|
||||
return Stringify(r)
|
||||
}
|
||||
|
||||
// String is a helper routine that allocates a new string value
|
||||
// to store v and returns a pointer to it.
|
||||
func String(v string) *string {
|
||||
p := new(string)
|
||||
*p = v
|
||||
return p
|
||||
}
|
||||
|
||||
// Int is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it, but unlike Int32
|
||||
// its argument value is an int.
|
||||
func Int(v int) *int {
|
||||
p := new(int)
|
||||
*p = v
|
||||
return p
|
||||
}
|
||||
|
||||
// Bool is a helper routine that allocates a new bool value
|
||||
// to store v and returns a pointer to it.
|
||||
func Bool(v bool) *bool {
|
||||
p := new(bool)
|
||||
*p = v
|
||||
return p
|
||||
}
|
||||
|
||||
// StreamToString converts a reader to a string
|
||||
func StreamToString(stream io.Reader) string {
|
||||
buf := new(bytes.Buffer)
|
||||
_, _ = buf.ReadFrom(stream)
|
||||
return buf.String()
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
package godo
|
||||
|
||||
import "fmt"
|
||||
|
||||
// ImageActionsService is an interface for interfacing with the image actions
|
||||
// endpoints of the DigitalOcean API
|
||||
// See: https://developers.digitalocean.com/documentation/v2#image-actions
|
||||
type ImageActionsService interface {
|
||||
Get(int, int) (*Action, *Response, error)
|
||||
Transfer(int, *ActionRequest) (*Action, *Response, error)
|
||||
}
|
||||
|
||||
// ImageActionsServiceOp handles communition with the image action related methods of the
|
||||
// DigitalOcean API.
|
||||
type ImageActionsServiceOp struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
var _ ImageActionsService = &ImageActionsServiceOp{}
|
||||
|
||||
// Transfer an image
|
||||
func (i *ImageActionsServiceOp) Transfer(imageID int, transferRequest *ActionRequest) (*Action, *Response, error) {
|
||||
if imageID < 1 {
|
||||
return nil, nil, NewArgError("imageID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
if transferRequest == nil {
|
||||
return nil, nil, NewArgError("transferRequest", "cannot be nil")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("v2/images/%d/actions", imageID)
|
||||
|
||||
req, err := i.client.NewRequest("POST", path, transferRequest)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(actionRoot)
|
||||
resp, err := i.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &root.Event, resp, err
|
||||
}
|
||||
|
||||
// Get an action for a particular image by id.
|
||||
func (i *ImageActionsServiceOp) Get(imageID, actionID int) (*Action, *Response, error) {
|
||||
if imageID < 1 {
|
||||
return nil, nil, NewArgError("imageID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
if actionID < 1 {
|
||||
return nil, nil, NewArgError("actionID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("v2/images/%d/actions/%d", imageID, actionID)
|
||||
|
||||
req, err := i.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(actionRoot)
|
||||
resp, err := i.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &root.Event, resp, err
|
||||
}
|
|
@ -0,0 +1,194 @@
|
|||
package godo
|
||||
|
||||
import "fmt"
|
||||
|
||||
const imageBasePath = "v2/images"
|
||||
|
||||
// ImagesService is an interface for interfacing with the images
|
||||
// endpoints of the DigitalOcean API
|
||||
// See: https://developers.digitalocean.com/documentation/v2#images
|
||||
type ImagesService interface {
|
||||
List(*ListOptions) ([]Image, *Response, error)
|
||||
ListDistribution(opt *ListOptions) ([]Image, *Response, error)
|
||||
ListApplication(opt *ListOptions) ([]Image, *Response, error)
|
||||
ListUser(opt *ListOptions) ([]Image, *Response, error)
|
||||
GetByID(int) (*Image, *Response, error)
|
||||
GetBySlug(string) (*Image, *Response, error)
|
||||
Update(int, *ImageUpdateRequest) (*Image, *Response, error)
|
||||
Delete(int) (*Response, error)
|
||||
}
|
||||
|
||||
// ImagesServiceOp handles communication with the image related methods of the
|
||||
// DigitalOcean API.
|
||||
type ImagesServiceOp struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
var _ ImagesService = &ImagesServiceOp{}
|
||||
|
||||
// Image represents a DigitalOcean Image
|
||||
type Image struct {
|
||||
ID int `json:"id,float64,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
Distribution string `json:"distribution,omitempty"`
|
||||
Slug string `json:"slug,omitempty"`
|
||||
Public bool `json:"public,omitempty"`
|
||||
Regions []string `json:"regions,omitempty"`
|
||||
MinDiskSize int `json:"min_disk_size,omitempty"`
|
||||
Created string `json:"created_at,omitempty"`
|
||||
}
|
||||
|
||||
// ImageUpdateRequest represents a request to update an image.
|
||||
type ImageUpdateRequest struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type imageRoot struct {
|
||||
Image Image
|
||||
}
|
||||
|
||||
type imagesRoot struct {
|
||||
Images []Image
|
||||
Links *Links `json:"links"`
|
||||
}
|
||||
|
||||
type listImageOptions struct {
|
||||
Private bool `url:"private,omitempty"`
|
||||
Type string `url:"type,omitempty"`
|
||||
}
|
||||
|
||||
func (i Image) String() string {
|
||||
return Stringify(i)
|
||||
}
|
||||
|
||||
// List lists all the images available.
|
||||
func (s *ImagesServiceOp) List(opt *ListOptions) ([]Image, *Response, error) {
|
||||
return s.list(opt, nil)
|
||||
}
|
||||
|
||||
// ListDistribution lists all the distribution images.
|
||||
func (s *ImagesServiceOp) ListDistribution(opt *ListOptions) ([]Image, *Response, error) {
|
||||
listOpt := listImageOptions{Type: "distribution"}
|
||||
return s.list(opt, &listOpt)
|
||||
}
|
||||
|
||||
// ListApplication lists all the application images.
|
||||
func (s *ImagesServiceOp) ListApplication(opt *ListOptions) ([]Image, *Response, error) {
|
||||
listOpt := listImageOptions{Type: "application"}
|
||||
return s.list(opt, &listOpt)
|
||||
}
|
||||
|
||||
// ListUser lists all the user images.
|
||||
func (s *ImagesServiceOp) ListUser(opt *ListOptions) ([]Image, *Response, error) {
|
||||
listOpt := listImageOptions{Private: true}
|
||||
return s.list(opt, &listOpt)
|
||||
}
|
||||
|
||||
// GetByID retrieves an image by id.
|
||||
func (s *ImagesServiceOp) GetByID(imageID int) (*Image, *Response, error) {
|
||||
if imageID < 1 {
|
||||
return nil, nil, NewArgError("imageID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
return s.get(interface{}(imageID))
|
||||
}
|
||||
|
||||
// GetBySlug retrieves an image by slug.
|
||||
func (s *ImagesServiceOp) GetBySlug(slug string) (*Image, *Response, error) {
|
||||
if len(slug) < 1 {
|
||||
return nil, nil, NewArgError("slug", "cannot be blank")
|
||||
}
|
||||
|
||||
return s.get(interface{}(slug))
|
||||
}
|
||||
|
||||
// Update an image name.
|
||||
func (s *ImagesServiceOp) Update(imageID int, updateRequest *ImageUpdateRequest) (*Image, *Response, error) {
|
||||
if imageID < 1 {
|
||||
return nil, nil, NewArgError("imageID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
if updateRequest == nil {
|
||||
return nil, nil, NewArgError("updateRequest", "cannot be nil")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%d", imageBasePath, imageID)
|
||||
req, err := s.client.NewRequest("PUT", path, updateRequest)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(imageRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &root.Image, resp, err
|
||||
}
|
||||
|
||||
// Delete an image.
|
||||
func (s *ImagesServiceOp) Delete(imageID int) (*Response, error) {
|
||||
if imageID < 1 {
|
||||
return nil, NewArgError("imageID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%d", imageBasePath, imageID)
|
||||
|
||||
req, err := s.client.NewRequest("DELETE", path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.client.Do(req, nil)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Helper method for getting an individual image
|
||||
func (s *ImagesServiceOp) get(ID interface{}) (*Image, *Response, error) {
|
||||
path := fmt.Sprintf("%s/%v", imageBasePath, ID)
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(imageRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &root.Image, resp, err
|
||||
}
|
||||
|
||||
// Helper method for listing images
|
||||
func (s *ImagesServiceOp) list(opt *ListOptions, listOpt *listImageOptions) ([]Image, *Response, error) {
|
||||
path := imageBasePath
|
||||
path, err := addOptions(path, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
path, err = addOptions(path, listOpt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(imagesRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.Images, resp, err
|
||||
}
|
|
@ -0,0 +1,222 @@
|
|||
package godo
|
||||
|
||||
import "fmt"
|
||||
|
||||
const keysBasePath = "v2/account/keys"
|
||||
|
||||
// KeysService is an interface for interfacing with the keys
|
||||
// endpoints of the DigitalOcean API
|
||||
// See: https://developers.digitalocean.com/documentation/v2#keys
|
||||
type KeysService interface {
|
||||
List(*ListOptions) ([]Key, *Response, error)
|
||||
GetByID(int) (*Key, *Response, error)
|
||||
GetByFingerprint(string) (*Key, *Response, error)
|
||||
Create(*KeyCreateRequest) (*Key, *Response, error)
|
||||
UpdateByID(int, *KeyUpdateRequest) (*Key, *Response, error)
|
||||
UpdateByFingerprint(string, *KeyUpdateRequest) (*Key, *Response, error)
|
||||
DeleteByID(int) (*Response, error)
|
||||
DeleteByFingerprint(string) (*Response, error)
|
||||
}
|
||||
|
||||
// KeysServiceOp handles communication with key related method of the
|
||||
// DigitalOcean API.
|
||||
type KeysServiceOp struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
var _ KeysService = &KeysServiceOp{}
|
||||
|
||||
// Key represents a DigitalOcean Key.
|
||||
type Key struct {
|
||||
ID int `json:"id,float64,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Fingerprint string `json:"fingerprint,omitempty"`
|
||||
PublicKey string `json:"public_key,omitempty"`
|
||||
}
|
||||
|
||||
// KeyUpdateRequest represents a request to update a DigitalOcean key.
|
||||
type KeyUpdateRequest struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type keysRoot struct {
|
||||
SSHKeys []Key `json:"ssh_keys"`
|
||||
Links *Links `json:"links"`
|
||||
}
|
||||
|
||||
type keyRoot struct {
|
||||
SSHKey Key `json:"ssh_key"`
|
||||
}
|
||||
|
||||
func (s Key) String() string {
|
||||
return Stringify(s)
|
||||
}
|
||||
|
||||
// KeyCreateRequest represents a request to create a new key.
|
||||
type KeyCreateRequest struct {
|
||||
Name string `json:"name"`
|
||||
PublicKey string `json:"public_key"`
|
||||
}
|
||||
|
||||
// List all keys
|
||||
func (s *KeysServiceOp) List(opt *ListOptions) ([]Key, *Response, error) {
|
||||
path := keysBasePath
|
||||
path, err := addOptions(path, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(keysRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.SSHKeys, resp, err
|
||||
}
|
||||
|
||||
// Performs a get given a path
|
||||
func (s *KeysServiceOp) get(path string) (*Key, *Response, error) {
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(keyRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &root.SSHKey, resp, err
|
||||
}
|
||||
|
||||
// GetByID gets a Key by id
|
||||
func (s *KeysServiceOp) GetByID(keyID int) (*Key, *Response, error) {
|
||||
if keyID < 1 {
|
||||
return nil, nil, NewArgError("keyID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%d", keysBasePath, keyID)
|
||||
return s.get(path)
|
||||
}
|
||||
|
||||
// GetByFingerprint gets a Key by by fingerprint
|
||||
func (s *KeysServiceOp) GetByFingerprint(fingerprint string) (*Key, *Response, error) {
|
||||
if len(fingerprint) < 1 {
|
||||
return nil, nil, NewArgError("fingerprint", "cannot not be empty")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%s", keysBasePath, fingerprint)
|
||||
return s.get(path)
|
||||
}
|
||||
|
||||
// Create a key using a KeyCreateRequest
|
||||
func (s *KeysServiceOp) Create(createRequest *KeyCreateRequest) (*Key, *Response, error) {
|
||||
if createRequest == nil {
|
||||
return nil, nil, NewArgError("createRequest", "cannot be nil")
|
||||
}
|
||||
|
||||
req, err := s.client.NewRequest("POST", keysBasePath, createRequest)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(keyRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &root.SSHKey, resp, err
|
||||
}
|
||||
|
||||
// UpdateByID updates a key name by ID.
|
||||
func (s *KeysServiceOp) UpdateByID(keyID int, updateRequest *KeyUpdateRequest) (*Key, *Response, error) {
|
||||
if keyID < 1 {
|
||||
return nil, nil, NewArgError("keyID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
if updateRequest == nil {
|
||||
return nil, nil, NewArgError("updateRequest", "cannot be nil")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%d", keysBasePath, keyID)
|
||||
req, err := s.client.NewRequest("PUT", path, updateRequest)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(keyRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &root.SSHKey, resp, err
|
||||
}
|
||||
|
||||
// UpdateByFingerprint updates a key name by fingerprint.
|
||||
func (s *KeysServiceOp) UpdateByFingerprint(fingerprint string, updateRequest *KeyUpdateRequest) (*Key, *Response, error) {
|
||||
if len(fingerprint) < 1 {
|
||||
return nil, nil, NewArgError("fingerprint", "cannot be empty")
|
||||
}
|
||||
|
||||
if updateRequest == nil {
|
||||
return nil, nil, NewArgError("updateRequest", "cannot be nil")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%s", keysBasePath, fingerprint)
|
||||
req, err := s.client.NewRequest("PUT", path, updateRequest)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(keyRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &root.SSHKey, resp, err
|
||||
}
|
||||
|
||||
// Delete key using a path
|
||||
func (s *KeysServiceOp) delete(path string) (*Response, error) {
|
||||
req, err := s.client.NewRequest("DELETE", path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.client.Do(req, nil)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// DeleteByID deletes a key by its id
|
||||
func (s *KeysServiceOp) DeleteByID(keyID int) (*Response, error) {
|
||||
if keyID < 1 {
|
||||
return nil, NewArgError("keyID", "cannot be less than 1")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%d", keysBasePath, keyID)
|
||||
return s.delete(path)
|
||||
}
|
||||
|
||||
// DeleteByFingerprint deletes a key by its fingerprint
|
||||
func (s *KeysServiceOp) DeleteByFingerprint(fingerprint string) (*Response, error) {
|
||||
if len(fingerprint) < 1 {
|
||||
return nil, NewArgError("fingerprint", "cannot be empty")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%s", keysBasePath, fingerprint)
|
||||
return s.delete(path)
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
package godo
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Links manages links that are returned along with a List
|
||||
type Links struct {
|
||||
Pages *Pages `json:"pages,omitempty"`
|
||||
Actions []LinkAction `json:"actions,omitempty"`
|
||||
}
|
||||
|
||||
// Pages are pages specified in Links
|
||||
type Pages struct {
|
||||
First string `json:"first,omitempty"`
|
||||
Prev string `json:"prev,omitempty"`
|
||||
Last string `json:"last,omitempty"`
|
||||
Next string `json:"next,omitempty"`
|
||||
}
|
||||
|
||||
// LinkAction is a pointer to an action
|
||||
type LinkAction struct {
|
||||
ID int `json:"id,omitempty"`
|
||||
Rel string `json:"rel,omitempty"`
|
||||
HREF string `json:"href,omitempty"`
|
||||
}
|
||||
|
||||
// CurrentPage is current page of the list
|
||||
func (l *Links) CurrentPage() (int, error) {
|
||||
return l.Pages.current()
|
||||
}
|
||||
|
||||
func (p *Pages) current() (int, error) {
|
||||
switch {
|
||||
case p == nil:
|
||||
return 1, nil
|
||||
case p.Prev == "" && p.Next != "":
|
||||
return 1, nil
|
||||
case p.Prev != "":
|
||||
prevPage, err := pageForURL(p.Prev)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return prevPage + 1, nil
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// IsLastPage returns true if the current page is the last
|
||||
func (l *Links) IsLastPage() bool {
|
||||
if l.Pages == nil {
|
||||
return true
|
||||
}
|
||||
return l.Pages.isLast()
|
||||
}
|
||||
|
||||
func (p *Pages) isLast() bool {
|
||||
if p.Last == "" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func pageForURL(urlText string) (int, error) {
|
||||
u, err := url.ParseRequestURI(urlText)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
pageStr := u.Query().Get("page")
|
||||
page, err := strconv.Atoi(pageStr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return page, nil
|
||||
}
|
||||
|
||||
// Get a link action by id.
|
||||
func (la *LinkAction) Get(client *Client) (*Action, *Response, error) {
|
||||
return client.Actions.Get(la.ID)
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
package godo
|
||||
|
||||
// RegionsService is an interface for interfacing with the regions
|
||||
// endpoints of the DigitalOcean API
|
||||
// See: https://developers.digitalocean.com/documentation/v2#regions
|
||||
type RegionsService interface {
|
||||
List(*ListOptions) ([]Region, *Response, error)
|
||||
}
|
||||
|
||||
// RegionsServiceOp handles communication with the region related methods of the
|
||||
// DigitalOcean API.
|
||||
type RegionsServiceOp struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
var _ RegionsService = &RegionsServiceOp{}
|
||||
|
||||
// Region represents a DigitalOcean Region
|
||||
type Region struct {
|
||||
Slug string `json:"slug,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Sizes []string `json:"sizes,omitempty"`
|
||||
Available bool `json:"available,omitempty"`
|
||||
Features []string `json:"features,omitempty"`
|
||||
}
|
||||
|
||||
type regionsRoot struct {
|
||||
Regions []Region
|
||||
Links *Links `json:"links"`
|
||||
}
|
||||
|
||||
type regionRoot struct {
|
||||
Region Region
|
||||
}
|
||||
|
||||
func (r Region) String() string {
|
||||
return Stringify(r)
|
||||
}
|
||||
|
||||
// List all regions
|
||||
func (s *RegionsServiceOp) List(opt *ListOptions) ([]Region, *Response, error) {
|
||||
path := "v2/regions"
|
||||
path, err := addOptions(path, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(regionsRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.Regions, resp, err
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
package godo
|
||||
|
||||
// SizesService is an interface for interfacing with the size
|
||||
// endpoints of the DigitalOcean API
|
||||
// See: https://developers.digitalocean.com/documentation/v2#sizes
|
||||
type SizesService interface {
|
||||
List(*ListOptions) ([]Size, *Response, error)
|
||||
}
|
||||
|
||||
// SizesServiceOp handles communication with the size related methods of the
|
||||
// DigitalOcean API.
|
||||
type SizesServiceOp struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
var _ SizesService = &SizesServiceOp{}
|
||||
|
||||
// Size represents a DigitalOcean Size
|
||||
type Size struct {
|
||||
Slug string `json:"slug,omitempty"`
|
||||
Memory int `json:"memory,omitempty"`
|
||||
Vcpus int `json:"vcpus,omitempty"`
|
||||
Disk int `json:"disk,omitempty"`
|
||||
PriceMonthly float64 `json:"price_monthly,omitempty"`
|
||||
PriceHourly float64 `json:"price_hourly,omitempty"`
|
||||
Regions []string `json:"regions,omitempty"`
|
||||
Available bool `json:"available,omitempty"`
|
||||
Transfer float64 `json:"transfer,omitempty"`
|
||||
}
|
||||
|
||||
func (s Size) String() string {
|
||||
return Stringify(s)
|
||||
}
|
||||
|
||||
type sizesRoot struct {
|
||||
Sizes []Size
|
||||
Links *Links `json:"links"`
|
||||
}
|
||||
|
||||
// List all images
|
||||
func (s *SizesServiceOp) List(opt *ListOptions) ([]Size, *Response, error) {
|
||||
path := "v2/sizes"
|
||||
path, err := addOptions(path, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := s.client.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(sizesRoot)
|
||||
resp, err := s.client.Do(req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
|
||||
return root.Sizes, resp, err
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
package godo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var timestampType = reflect.TypeOf(Timestamp{})
|
||||
|
||||
// Stringify attempts to create a string representation of DigitalOcean types
|
||||
func Stringify(message interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
v := reflect.ValueOf(message)
|
||||
stringifyValue(&buf, v)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// stringifyValue was graciously cargoculted from the goprotubuf library
|
||||
func stringifyValue(w *bytes.Buffer, val reflect.Value) {
|
||||
if val.Kind() == reflect.Ptr && val.IsNil() {
|
||||
_, _ = w.Write([]byte("<nil>"))
|
||||
return
|
||||
}
|
||||
|
||||
v := reflect.Indirect(val)
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
fmt.Fprintf(w, `"%s"`, v)
|
||||
case reflect.Slice:
|
||||
_, _ = w.Write([]byte{'['})
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i > 0 {
|
||||
_, _ = w.Write([]byte{' '})
|
||||
}
|
||||
|
||||
stringifyValue(w, v.Index(i))
|
||||
}
|
||||
|
||||
_, _ = w.Write([]byte{']'})
|
||||
return
|
||||
case reflect.Struct:
|
||||
if v.Type().Name() != "" {
|
||||
_, _ = w.Write([]byte(v.Type().String()))
|
||||
}
|
||||
|
||||
// special handling of Timestamp values
|
||||
if v.Type() == timestampType {
|
||||
fmt.Fprintf(w, "{%s}", v.Interface())
|
||||
return
|
||||
}
|
||||
|
||||
_, _ = w.Write([]byte{'{'})
|
||||
|
||||
var sep bool
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
fv := v.Field(i)
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Slice && fv.IsNil() {
|
||||
continue
|
||||
}
|
||||
|
||||
if sep {
|
||||
_, _ = w.Write([]byte(", "))
|
||||
} else {
|
||||
sep = true
|
||||
}
|
||||
|
||||
_, _ = w.Write([]byte(v.Type().Field(i).Name))
|
||||
_, _ = w.Write([]byte{':'})
|
||||
stringifyValue(w, fv)
|
||||
}
|
||||
|
||||
_, _ = w.Write([]byte{'}'})
|
||||
default:
|
||||
if v.CanInterface() {
|
||||
fmt.Fprint(w, v.Interface())
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
package godo
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Timestamp represents a time that can be unmarshalled from a JSON string
|
||||
// formatted as either an RFC3339 or Unix timestamp. All
|
||||
// exported methods of time.Time can be called on Timestamp.
|
||||
type Timestamp struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
func (t Timestamp) String() string {
|
||||
return t.Time.String()
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
// Time is expected in RFC3339 or Unix format.
|
||||
func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
str := string(data)
|
||||
i, err := strconv.ParseInt(str, 10, 64)
|
||||
if err == nil {
|
||||
t.Time = time.Unix(i, 0)
|
||||
} else {
|
||||
t.Time, err = time.Parse(`"`+time.RFC3339+`"`, str)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Equal reports whether t and u are equal based on time.Equal
|
||||
func (t Timestamp) Equal(u Timestamp) bool {
|
||||
return t.Time.Equal(u.Time)
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/digitalocean/godo"
|
||||
)
|
||||
|
||||
const (
|
||||
// activeFailure is the amount of times we can fail before deciding
|
||||
// the check for active is a total failure. This can help account
|
||||
// for servers randomly not answering.
|
||||
activeFailure = 3
|
||||
)
|
||||
|
||||
// WaitForActive waits for a droplet to become active
|
||||
func WaitForActive(client *godo.Client, monitorURI string) error {
|
||||
if len(monitorURI) == 0 {
|
||||
return fmt.Errorf("create had no monitor uri")
|
||||
}
|
||||
|
||||
completed := false
|
||||
failCount := 0
|
||||
for !completed {
|
||||
action, _, err := client.DropletActions.GetByURI(monitorURI)
|
||||
|
||||
if err != nil {
|
||||
if failCount <= activeFailure {
|
||||
failCount++
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
switch action.Status {
|
||||
case godo.ActionInProgress:
|
||||
time.Sleep(5 * time.Second)
|
||||
case godo.ActionCompleted:
|
||||
completed = true
|
||||
default:
|
||||
return fmt.Errorf("unknown status: [%s]", action.Status)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue