diff --git a/README.md b/README.md index 5249f515e..1c30fd877 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,7 @@ comes out of the box with support for the following platforms: * Parallels * ProfitBricks * QEMU. Both KVM and Xen images. +* Scaleway * Triton (Joyent Public Cloud) * VMware * VirtualBox diff --git a/builder/scaleway/artifact.go b/builder/scaleway/artifact.go new file mode 100644 index 000000000..b5aea88da --- /dev/null +++ b/builder/scaleway/artifact.go @@ -0,0 +1,62 @@ +package scaleway + +import ( + "fmt" + "log" + + "github.com/scaleway/scaleway-cli/pkg/api" +) + +type Artifact struct { + // The name of the image + imageName string + + // The ID of the image + imageID string + + // The name of the snapshot + snapshotName string + + // The ID of the snapshot + snapshotID string + + // The name of the region + regionName string + + // The client for making API calls + client *api.ScalewayAPI +} + +func (*Artifact) BuilderId() string { + return BuilderId +} + +func (*Artifact) Files() []string { + // No files with Scaleway + return nil +} + +func (a *Artifact) Id() string { + return fmt.Sprintf("%s:%s", a.regionName, a.imageID) +} + +func (a *Artifact) String() string { + return fmt.Sprintf("An image was created: '%v' (ID: %v) in region '%v' based on snapshot '%v' (ID: %v)", + a.imageName, a.imageID, a.regionName, a.snapshotName, a.snapshotID) +} + +func (a *Artifact) State(name string) interface{} { + return nil +} + +func (a *Artifact) Destroy() error { + log.Printf("Destroying image: %s (%s)", a.imageID, a.imageName) + if err := a.client.DeleteImage(a.imageID); err != nil { + return err + } + log.Printf("Destroying snapshot: %s (%s)", a.snapshotID, a.snapshotName) + if err := a.client.DeleteSnapshot(a.snapshotID); err != nil { + return err + } + return nil +} diff --git a/builder/scaleway/artifact_test.go b/builder/scaleway/artifact_test.go new file mode 100644 index 000000000..8f158ef5f --- /dev/null +++ b/builder/scaleway/artifact_test.go @@ -0,0 +1,33 @@ +package scaleway + +import ( + "testing" + + "github.com/hashicorp/packer/packer" +) + +func TestArtifact_Impl(t *testing.T) { + var raw interface{} + raw = &Artifact{} + if _, ok := raw.(packer.Artifact); !ok { + t.Fatalf("Artifact should be artifact") + } +} + +func TestArtifactId(t *testing.T) { + a := &Artifact{"packer-foobar-image", "cc586e45-5156-4f71-b223-cf406b10dd1d", "packer-foobar-snapshot", "cc586e45-5156-4f71-b223-cf406b10dd1c", "ams1", nil} + expected := "ams1:cc586e45-5156-4f71-b223-cf406b10dd1d" + + if a.Id() != expected { + t.Fatalf("artifact ID should match: %v", expected) + } +} + +func TestArtifactString(t *testing.T) { + a := &Artifact{"packer-foobar-image", "cc586e45-5156-4f71-b223-cf406b10dd1d", "packer-foobar-snapshot", "cc586e45-5156-4f71-b223-cf406b10dd1c", "ams1", nil} + expected := "An image was created: 'packer-foobar-image' (ID: cc586e45-5156-4f71-b223-cf406b10dd1d) in region 'ams1' based on snapshot 'packer-foobar-snapshot' (ID: cc586e45-5156-4f71-b223-cf406b10dd1c)" + + if a.String() != expected { + t.Fatalf("artifact string should match: %v", expected) + } +} diff --git a/builder/scaleway/builder.go b/builder/scaleway/builder.go new file mode 100644 index 000000000..457002c74 --- /dev/null +++ b/builder/scaleway/builder.go @@ -0,0 +1,106 @@ +// The scaleway package contains a packer.Builder implementation +// that builds Scaleway images (snapshots). + +package scaleway + +import ( + "errors" + "fmt" + "log" + + "github.com/hashicorp/packer/common" + "github.com/hashicorp/packer/helper/communicator" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +// The unique id for the builder +const BuilderId = "hashicorp.scaleway" + +type Builder struct { + config Config + runner multistep.Runner +} + +func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { + c, warnings, errs := NewConfig(raws...) + if errs != nil { + return warnings, errs + } + b.config = *c + + return nil, nil +} + +func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { + client, err := api.NewScalewayAPI(b.config.Organization, b.config.Token, b.config.UserAgent, b.config.Region) + + if err != nil { + ui.Error(err.Error()) + return nil, err + } + + state := new(multistep.BasicStateBag) + state.Put("config", b.config) + state.Put("client", client) + state.Put("hook", hook) + state.Put("ui", ui) + + steps := []multistep.Step{ + &stepCreateSSHKey{ + Debug: b.config.PackerDebug, + DebugKeyPath: fmt.Sprintf("scw_%s.pem", b.config.PackerBuildName), + PrivateKeyFile: b.config.Comm.SSHPrivateKey, + }, + new(stepCreateServer), + new(stepServerInfo), + &communicator.StepConnect{ + Config: &b.config.Comm, + Host: commHost, + SSHConfig: sshConfig, + }, + new(common.StepProvision), + new(stepShutdown), + new(stepSnapshot), + new(stepImage), + } + + b.runner = common.NewRunnerWithPauseFn(steps, b.config.PackerConfig, ui, state) + b.runner.Run(state) + + if rawErr, ok := state.GetOk("error"); ok { + return nil, rawErr.(error) + } + + // If we were interrupted or cancelled, then just exit. + if _, ok := state.GetOk(multistep.StateCancelled); ok { + return nil, errors.New("Build was cancelled.") + } + + if _, ok := state.GetOk(multistep.StateHalted); ok { + return nil, errors.New("Build was halted.") + } + + if _, ok := state.GetOk("snapshot_name"); !ok { + return nil, errors.New("Cannot find snapshot_name in state.") + } + + artifact := &Artifact{ + imageName: state.Get("image_name").(string), + imageID: state.Get("image_id").(string), + snapshotName: state.Get("snapshot_name").(string), + snapshotID: state.Get("snapshot_id").(string), + regionName: state.Get("region").(string), + client: client, + } + + return artifact, nil +} + +func (b *Builder) Cancel() { + if b.runner != nil { + log.Println("Cancelling the step runner...") + b.runner.Cancel() + } +} diff --git a/builder/scaleway/builder_test.go b/builder/scaleway/builder_test.go new file mode 100644 index 000000000..230c9c0c6 --- /dev/null +++ b/builder/scaleway/builder_test.go @@ -0,0 +1,237 @@ +package scaleway + +import ( + "strconv" + "testing" + + "github.com/hashicorp/packer/packer" +) + +func testConfig() map[string]interface{} { + return map[string]interface{}{ + "api_access_key": "foo", + "api_token": "bar", + "region": "ams1", + "commercial_type": "VC1S", + "ssh_username": "root", + "image": "image-uuid", + } +} + +func TestBuilder_ImplementsBuilder(t *testing.T) { + var raw interface{} + raw = &Builder{} + if _, ok := raw.(packer.Builder); !ok { + t.Fatalf("Builder should be a builder") + } +} + +func TestBuilder_Prepare_BadType(t *testing.T) { + b := &Builder{} + c := map[string]interface{}{ + "api_token": []string{}, + } + + warnings, err := b.Prepare(c) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err == nil { + t.Fatalf("prepare should fail") + } +} + +func TestBuilderPrepare_InvalidKey(t *testing.T) { + var b Builder + config := testConfig() + + config["i_should_not_be_valid"] = true + warnings, err := b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err == nil { + t.Fatal("should have error") + } +} + +func TestBuilderPrepare_Region(t *testing.T) { + var b Builder + config := testConfig() + + delete(config, "region") + warnings, err := b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err == nil { + t.Fatalf("should error") + } + + expected := "ams1" + + config["region"] = expected + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + if b.config.Region != expected { + t.Errorf("found %s, expected %s", b.config.Region, expected) + } +} + +func TestBuilderPrepare_CommercialType(t *testing.T) { + var b Builder + config := testConfig() + + delete(config, "commercial_type") + warnings, err := b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err == nil { + t.Fatalf("should error") + } + + expected := "VC1S" + + config["commercial_type"] = expected + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + if b.config.CommercialType != expected { + t.Errorf("found %s, expected %s", b.config.CommercialType, expected) + } +} + +func TestBuilderPrepare_Image(t *testing.T) { + var b Builder + config := testConfig() + + delete(config, "image") + warnings, err := b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err == nil { + t.Fatal("should error") + } + + expected := "cc586e45-5156-4f71-b223-cf406b10dd1c" + + config["image"] = expected + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + if b.config.Image != expected { + t.Errorf("found %s, expected %s", b.config.Image, expected) + } +} + +func TestBuilderPrepare_SnapshotName(t *testing.T) { + var b Builder + config := testConfig() + + warnings, err := b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + if b.config.SnapshotName == "" { + t.Errorf("invalid: %s", b.config.SnapshotName) + } + + config["snapshot_name"] = "foobarbaz" + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + config["snapshot_name"] = "{{timestamp}}" + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + _, err = strconv.ParseInt(b.config.SnapshotName, 0, 0) + if err != nil { + t.Fatalf("failed to parse int in template: %s", err) + } + +} + +func TestBuilderPrepare_ServerName(t *testing.T) { + var b Builder + config := testConfig() + + warnings, err := b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + if b.config.ServerName == "" { + t.Errorf("invalid: %s", b.config.ServerName) + } + + config["server_name"] = "foobar" + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + config["server_name"] = "foobar-{{timestamp}}" + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + config["server_name"] = "foobar-{{" + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err == nil { + t.Fatal("should have error") + } + +} diff --git a/builder/scaleway/config.go b/builder/scaleway/config.go new file mode 100644 index 000000000..f965e1ce0 --- /dev/null +++ b/builder/scaleway/config.go @@ -0,0 +1,122 @@ +package scaleway + +import ( + "errors" + "fmt" + "os" + + "github.com/hashicorp/packer/common" + "github.com/hashicorp/packer/common/uuid" + "github.com/hashicorp/packer/helper/communicator" + "github.com/hashicorp/packer/helper/config" + "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/template/interpolate" + "github.com/mitchellh/mapstructure" +) + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + Comm communicator.Config `mapstructure:",squash"` + + Token string `mapstructure:"api_token"` + Organization string `mapstructure:"api_access_key"` + + Region string `mapstructure:"region"` + Image string `mapstructure:"image"` + CommercialType string `mapstructure:"commercial_type"` + + SnapshotName string `mapstructure:"snapshot_name"` + ImageName string `mapstructure:"image_name"` + ServerName string `mapstructure:"server_name"` + + UserAgent string + ctx interpolate.Context +} + +func NewConfig(raws ...interface{}) (*Config, []string, error) { + c := new(Config) + + var md mapstructure.Metadata + err := config.Decode(c, &config.DecodeOpts{ + Metadata: &md, + Interpolate: true, + InterpolateContext: &c.ctx, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "run_command", + }, + }, + }, raws...) + if err != nil { + return nil, nil, err + } + + c.UserAgent = "Packer - Scaleway builder" + + if c.Organization == "" { + c.Organization = os.Getenv("SCALEWAY_API_ACCESS_KEY") + } + + if c.Token == "" { + c.Token = os.Getenv("SCALEWAY_API_TOKEN") + } + + if c.SnapshotName == "" { + def, err := interpolate.Render("snapshot-packer-{{timestamp}}", nil) + if err != nil { + panic(err) + } + + c.SnapshotName = def + } + + if c.ImageName == "" { + def, err := interpolate.Render("image-packer-{{timestamp}}", nil) + if err != nil { + panic(err) + } + + c.ImageName = def + } + + if c.ServerName == "" { + // Default to packer-[time-ordered-uuid] + c.ServerName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) + } + + var errs *packer.MultiError + if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { + errs = packer.MultiErrorAppend(errs, es...) + } + if c.Organization == "" { + errs = packer.MultiErrorAppend( + errs, errors.New("Scaleway Organization ID must be specified")) + } + + if c.Token == "" { + errs = packer.MultiErrorAppend( + errs, errors.New("Scaleway Token must be specified")) + } + + if c.Region == "" { + errs = packer.MultiErrorAppend( + errs, errors.New("region is required")) + } + + if c.CommercialType == "" { + errs = packer.MultiErrorAppend( + errs, errors.New("commercial type is required")) + } + + if c.Image == "" { + errs = packer.MultiErrorAppend( + errs, errors.New("image is required")) + } + + if errs != nil && len(errs.Errors) > 0 { + return nil, nil, errs + } + + common.ScrubConfig(c, c.Token) + return c, nil, nil +} diff --git a/builder/scaleway/ssh.go b/builder/scaleway/ssh.go new file mode 100644 index 000000000..a2c9b8f16 --- /dev/null +++ b/builder/scaleway/ssh.go @@ -0,0 +1,64 @@ +package scaleway + +import ( + "fmt" + "net" + "os" + + packerssh "github.com/hashicorp/packer/communicator/ssh" + "github.com/hashicorp/packer/helper/multistep" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" +) + +func commHost(state multistep.StateBag) (string, error) { + ipAddress := state.Get("server_ip").(string) + return ipAddress, nil +} + +func sshConfig(state multistep.StateBag) (*ssh.ClientConfig, error) { + config := state.Get("config").(Config) + var privateKey string + + var auth []ssh.AuthMethod + + if config.Comm.SSHAgentAuth { + authSock := os.Getenv("SSH_AUTH_SOCK") + if authSock == "" { + return nil, fmt.Errorf("SSH_AUTH_SOCK is not set") + } + + sshAgent, err := net.Dial("unix", authSock) + if err != nil { + return nil, fmt.Errorf("Cannot connect to SSH Agent socket %q: %s", authSock, err) + } + auth = []ssh.AuthMethod{ + ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers), + } + } + + if config.Comm.SSHPassword != "" { + auth = append(auth, + ssh.Password(config.Comm.SSHPassword), + ssh.KeyboardInteractive( + packerssh.PasswordKeyboardInteractive(config.Comm.SSHPassword)), + ) + } + + if config.Comm.SSHPrivateKey != "" { + if priv, ok := state.GetOk("privateKey"); ok { + privateKey = priv.(string) + } + signer, err := ssh.ParsePrivateKey([]byte(privateKey)) + if err != nil { + return nil, fmt.Errorf("Error setting up SSH config: %s", err) + } + auth = append(auth, ssh.PublicKeys(signer)) + } + + return &ssh.ClientConfig{ + User: config.Comm.SSHUsername, + Auth: auth, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + }, nil +} diff --git a/builder/scaleway/step_create_image.go b/builder/scaleway/step_create_image.go new file mode 100644 index 000000000..16345e74d --- /dev/null +++ b/builder/scaleway/step_create_image.go @@ -0,0 +1,54 @@ +package scaleway + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +type stepImage struct{} + +func (s *stepImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { + client := state.Get("client").(*api.ScalewayAPI) + ui := state.Get("ui").(packer.Ui) + c := state.Get("config").(Config) + snapshotID := state.Get("snapshot_id").(string) + bootscriptID := "" + + ui.Say(fmt.Sprintf("Creating image: %v", c.ImageName)) + + image, err := client.GetImage(c.Image) + if err != nil { + err := fmt.Errorf("Error getting initial image info: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + if image.DefaultBootscript != nil { + bootscriptID = image.DefaultBootscript.Identifier + } + + imageID, err := client.PostImage(snapshotID, c.ImageName, bootscriptID, image.Arch) + if err != nil { + err := fmt.Errorf("Error creating image: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + log.Printf("Image ID: %s", imageID) + state.Put("image_id", imageID) + state.Put("image_name", c.ImageName) + state.Put("region", c.Region) + + return multistep.ActionContinue +} + +func (s *stepImage) Cleanup(state multistep.StateBag) { + // no cleanup +} diff --git a/builder/scaleway/step_create_server.go b/builder/scaleway/step_create_server.go new file mode 100644 index 000000000..57511d149 --- /dev/null +++ b/builder/scaleway/step_create_server.go @@ -0,0 +1,78 @@ +package scaleway + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +type stepCreateServer struct { + serverID string +} + +func (s *stepCreateServer) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { + client := state.Get("client").(*api.ScalewayAPI) + ui := state.Get("ui").(packer.Ui) + c := state.Get("config").(Config) + sshPubKey := state.Get("ssh_pubkey").(string) + tags := []string{} + + ui.Say("Creating server...") + + if sshPubKey != "" { + tags = []string{fmt.Sprintf("AUTHORIZED_KEY=%s", strings.TrimSpace(sshPubKey))} + } + + server, err := client.PostServer(api.ScalewayServerDefinition{ + Name: c.ServerName, + Image: &c.Image, + Organization: c.Organization, + CommercialType: c.CommercialType, + Tags: tags, + }) + + if err != nil { + err := fmt.Errorf("Error creating server: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + err = client.PostServerAction(server, "poweron") + + if err != nil { + err := fmt.Errorf("Error starting server: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + s.serverID = server + + state.Put("server_id", server) + + return multistep.ActionContinue +} + +func (s *stepCreateServer) Cleanup(state multistep.StateBag) { + if s.serverID == "" { + return + } + + client := state.Get("client").(*api.ScalewayAPI) + ui := state.Get("ui").(packer.Ui) + + ui.Say("Destroying server...") + + err := client.DeleteServerForce(s.serverID) + + if err != nil { + ui.Error(fmt.Sprintf( + "Error destroying server. Please destroy it manually: %s", err)) + } + +} diff --git a/builder/scaleway/step_create_ssh_key.go b/builder/scaleway/step_create_ssh_key.go new file mode 100644 index 000000000..c5405ccd6 --- /dev/null +++ b/builder/scaleway/step_create_ssh_key.go @@ -0,0 +1,106 @@ +package scaleway + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "log" + "os" + "runtime" + "strings" + + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" + "golang.org/x/crypto/ssh" +) + +type stepCreateSSHKey struct { + Debug bool + DebugKeyPath string + PrivateKeyFile string +} + +func (s *stepCreateSSHKey) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + + if s.PrivateKeyFile != "" { + ui.Say("Using existing SSH private key") + privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile) + if err != nil { + state.Put("error", fmt.Errorf( + "Error loading configured private key file: %s", err)) + return multistep.ActionHalt + } + + state.Put("privateKey", string(privateKeyBytes)) + state.Put("ssh_pubkey", "") + + return multistep.ActionContinue + } + + ui.Say("Creating temporary ssh key for server...") + + priv, err := rsa.GenerateKey(rand.Reader, 2014) + if err != nil { + err := fmt.Errorf("Error creating temporary SSH key: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + // ASN.1 DER encoded form + priv_der := x509.MarshalPKCS1PrivateKey(priv) + priv_blk := pem.Block{ + Type: "RSA PRIVATE KEY", + Headers: nil, + Bytes: priv_der, + } + + // Set the private key in the statebag for later + state.Put("privateKey", string(pem.EncodeToMemory(&priv_blk))) + + pub, _ := ssh.NewPublicKey(&priv.PublicKey) + pub_sshformat := string(ssh.MarshalAuthorizedKey(pub)) + pub_sshformat = strings.Replace(pub_sshformat, " ", "_", -1) + + log.Printf("temporary ssh key created") + + // Remember some state for the future + state.Put("ssh_pubkey", string(pub_sshformat)) + + // If we're in debug mode, output the private key to the working directory. + if s.Debug { + ui.Message(fmt.Sprintf("Saving key for debug purposes: %s", s.DebugKeyPath)) + f, err := os.Create(s.DebugKeyPath) + if err != nil { + state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) + return multistep.ActionHalt + } + defer f.Close() + + // Write the key out + if _, err := f.Write(pem.EncodeToMemory(&priv_blk)); err != nil { + state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) + return multistep.ActionHalt + } + + // Chmod it so that it is SSH ready + if runtime.GOOS != "windows" { + if err := f.Chmod(0600); err != nil { + state.Put("error", fmt.Errorf("Error setting permissions of debug key: %s", err)) + return multistep.ActionHalt + } + } + } + + return multistep.ActionContinue +} + +func (s *stepCreateSSHKey) Cleanup(state multistep.StateBag) { + // SSH key is passed via tag. Nothing to do here. + return +} diff --git a/builder/scaleway/step_server_info.go b/builder/scaleway/step_server_info.go new file mode 100644 index 000000000..7ab14658f --- /dev/null +++ b/builder/scaleway/step_server_info.go @@ -0,0 +1,45 @@ +package scaleway + +import ( + "context" + "fmt" + + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +type stepServerInfo struct{} + +func (s *stepServerInfo) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { + client := state.Get("client").(*api.ScalewayAPI) + ui := state.Get("ui").(packer.Ui) + serverID := state.Get("server_id").(string) + + ui.Say("Waiting for server to become active...") + + _, err := api.WaitForServerState(client, serverID, "running") + if err != nil { + err := fmt.Errorf("Error waiting for server to become booted: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + server, err := client.GetServer(serverID) + if err != nil { + err := fmt.Errorf("Error retrieving server: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + state.Put("server_ip", server.PublicAddress.IP) + state.Put("root_volume_id", server.Volumes["0"].Identifier) + + return multistep.ActionContinue +} + +func (s *stepServerInfo) Cleanup(state multistep.StateBag) { + // no cleanup +} diff --git a/builder/scaleway/step_shutdown.go b/builder/scaleway/step_shutdown.go new file mode 100644 index 000000000..a8a029259 --- /dev/null +++ b/builder/scaleway/step_shutdown.go @@ -0,0 +1,44 @@ +package scaleway + +import ( + "context" + "fmt" + + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +type stepShutdown struct{} + +func (s *stepShutdown) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { + client := state.Get("client").(*api.ScalewayAPI) + ui := state.Get("ui").(packer.Ui) + serverID := state.Get("server_id").(string) + + ui.Say("Shutting down server...") + + err := client.PostServerAction(serverID, "poweroff") + + if err != nil { + err := fmt.Errorf("Error stopping server: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + _, err = api.WaitForServerState(client, serverID, "stopped") + + if err != nil { + err := fmt.Errorf("Error shutting down server: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (s *stepShutdown) Cleanup(state multistep.StateBag) { + // no cleanup +} diff --git a/builder/scaleway/step_snapshot.go b/builder/scaleway/step_snapshot.go new file mode 100644 index 000000000..fd0dcb593 --- /dev/null +++ b/builder/scaleway/step_snapshot.go @@ -0,0 +1,40 @@ +package scaleway + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +type stepSnapshot struct{} + +func (s *stepSnapshot) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { + client := state.Get("client").(*api.ScalewayAPI) + ui := state.Get("ui").(packer.Ui) + c := state.Get("config").(Config) + volumeID := state.Get("root_volume_id").(string) + + ui.Say(fmt.Sprintf("Creating snapshot: %v", c.SnapshotName)) + snapshot, err := client.PostSnapshot(volumeID, c.SnapshotName) + if err != nil { + err := fmt.Errorf("Error creating snapshot: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + log.Printf("Snapshot ID: %s", snapshot) + state.Put("snapshot_id", snapshot) + state.Put("snapshot_name", c.SnapshotName) + state.Put("region", c.Region) + + return multistep.ActionContinue +} + +func (s *stepSnapshot) Cleanup(state multistep.StateBag) { + // no cleanup +} diff --git a/command/plugin.go b/command/plugin.go index 55cfacc60..60651332d 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -39,6 +39,7 @@ import ( parallelspvmbuilder "github.com/hashicorp/packer/builder/parallels/pvm" profitbricksbuilder "github.com/hashicorp/packer/builder/profitbricks" qemubuilder "github.com/hashicorp/packer/builder/qemu" + scalewaybuilder "github.com/hashicorp/packer/builder/scaleway" tritonbuilder "github.com/hashicorp/packer/builder/triton" virtualboxisobuilder "github.com/hashicorp/packer/builder/virtualbox/iso" virtualboxovfbuilder "github.com/hashicorp/packer/builder/virtualbox/ovf" @@ -108,6 +109,7 @@ var Builders = map[string]packer.Builder{ "parallels-pvm": new(parallelspvmbuilder.Builder), "profitbricks": new(profitbricksbuilder.Builder), "qemu": new(qemubuilder.Builder), + "scaleway": new(scalewaybuilder.Builder), "triton": new(tritonbuilder.Builder), "virtualbox-iso": new(virtualboxisobuilder.Builder), "virtualbox-ovf": new(virtualboxovfbuilder.Builder), diff --git a/post-processor/vagrant-cloud/post-processor.go b/post-processor/vagrant-cloud/post-processor.go index f6c7f19d6..273d2fdb8 100644 --- a/post-processor/vagrant-cloud/post-processor.go +++ b/post-processor/vagrant-cloud/post-processor.go @@ -189,6 +189,8 @@ func providerFromBuilderName(name string) string { switch name { case "aws": return "aws" + case "scaleway": + return "scaleway" case "digitalocean": return "digitalocean" case "virtualbox": diff --git a/post-processor/vagrant/post-processor.go b/post-processor/vagrant/post-processor.go index 310270809..f3bb6e346 100644 --- a/post-processor/vagrant/post-processor.go +++ b/post-processor/vagrant/post-processor.go @@ -26,6 +26,7 @@ var builtins = map[string]string{ "mitchellh.vmware-esx": "vmware", "pearkes.digitalocean": "digitalocean", "packer.googlecompute": "google", + "hashicorp.scaleway": "scaleway", "packer.parallels": "parallels", "MSOpenTech.hyperv": "hyperv", "transcend.qemu": "libvirt", @@ -221,6 +222,8 @@ func providerForName(name string) Provider { switch name { case "aws": return new(AWSProvider) + case "scaleway": + return new(ScalewayProvider) case "digitalocean": return new(DigitalOceanProvider) case "virtualbox": diff --git a/post-processor/vagrant/scaleway.go b/post-processor/vagrant/scaleway.go new file mode 100644 index 000000000..879f2b59e --- /dev/null +++ b/post-processor/vagrant/scaleway.go @@ -0,0 +1,52 @@ +package vagrant + +import ( + "bytes" + "fmt" + "github.com/hashicorp/packer/packer" + "strings" + "text/template" +) + +type scalewayVagrantfileTemplate struct { + Image string "" + Region string "" +} + +type ScalewayProvider struct{} + +func (p *ScalewayProvider) KeepInputArtifact() bool { + return true +} + +func (p *ScalewayProvider) Process(ui packer.Ui, artifact packer.Artifact, dir string) (vagrantfile string, metadata map[string]interface{}, err error) { + // Create the metadata + metadata = map[string]interface{}{"provider": "scaleway"} + + // Determine the image and region... + tplData := &scalewayVagrantfileTemplate{} + + parts := strings.Split(artifact.Id(), ":") + if len(parts) != 2 { + err = fmt.Errorf("Poorly formatted artifact ID: %s", artifact.Id()) + return + } + tplData.Region = parts[0] + tplData.Image = parts[1] + + // Build up the Vagrantfile + var contents bytes.Buffer + t := template.Must(template.New("vf").Parse(defaultScalewayVagrantfile)) + err = t.Execute(&contents, tplData) + vagrantfile = contents.String() + return +} + +var defaultScalewayVagrantfile = ` +Vagrant.configure("2") do |config| + config.vm.provider :scaleway do |scaleway| + scaleway.image = "{{ .Image }}" + scaleway.region = "{{ .Region }}" + end +end +` diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md new file mode 100644 index 000000000..747e4d89a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,94 @@ +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 000000000..f090cb42f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md new file mode 100644 index 000000000..c32287611 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,479 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus) + +**Seeing weird case-sensitive problems?** See [this +issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021). +This change has been reverted. I apologize for causing this. I greatly +underestimated the impact this would have. Logrus strives for stability and +backwards compatibility and failed to provide that. + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not +yet stable (pre 1.0). Logrus itself is completely stable and has been used in +many large deployments. The core API is unlikely to change much but please +version control your Logrus to make sure you aren't fetching latest `master` on +every build.** + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/Sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/Sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/Sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging though logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/Sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +| Hook | Description | +| ----- | ----------- | +| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | +| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | +| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) | +| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| +| [Firehose](https://github.com/beaubrewer/firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/) +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | +| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | +| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | +| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/Sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +logger, hook := NewNullLogger() +logger.Error("Hello error") + +assert.Equal(1, len(hook.Entries)) +assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) +assert.Equal("Hello error", hook.LastEntry().Message) + +hook.Reset() +assert.Nil(hook.LastEntry()) +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go new file mode 100644 index 000000000..8af90637a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go @@ -0,0 +1,64 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke +// all handlers. The handlers will also be invoked when any Fatal log entry is +// made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go new file mode 100644 index 000000000..dddd5f877 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/Sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/Sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 000000000..4edbe7a2d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,275 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "sync" + "time" +) + +var bufferPool *sync.Pool + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), an Buffer may be set to entry + Buffer *bytes.Buffer +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer + serialized, err := entry.Logger.Formatter.Format(&entry) + entry.Buffer = nil + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } else { + entry.Logger.mu.Lock() + _, err = entry.Logger.Out.Write(serialized) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + entry.Logger.mu.Unlock() + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 000000000..9a0120ac1 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,193 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.Level = level +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.Level +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 000000000..b5fbe934d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,45 @@ +package logrus + +import "time" + +const DefaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + if t, ok := data["time"]; ok { + data["fields.time"] = t + } + + if m, ok := data["msg"]; ok { + data["fields.msg"] = m + } + + if l, ok := data["level"]; ok { + data["fields.level"] = l + } +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 000000000..3f151cdc3 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 000000000..266554e9f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,74 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type fieldKey string +type FieldMap map[fieldKey]string + +const ( + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" +) + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // FieldMap allows users to customize the names of keys for various fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyLevel: "@message", + // }, + // } + FieldMap FieldMap +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 000000000..b769f3d35 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,308 @@ +package logrus + +import ( + "io" + "os" + "sync" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool +} + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + logger.entryPool.Put(entry) +} + +// Adds a field to the log entry, note that it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debugf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Infof(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Errorf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatalf(format, args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panicf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debug(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Error(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatal(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panic(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debugln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Infoln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Errorln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatalln(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panicln(args...) + logger.releaseEntry(entry) + } +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 000000000..e59669111 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,143 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint8 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go new file mode 100644 index 000000000..5f6be4d3c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,10 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 000000000..308160ca8 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,14 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 000000000..190297abf --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,28 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package logrus + +import ( + "io" + "os" + "syscall" + "unsafe" +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + var termios Termios + switch v := f.(type) { + case *os.File: + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go new file mode 100644 index 000000000..3c86b1abe --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go @@ -0,0 +1,21 @@ +// +build solaris,!appengine + +package logrus + +import ( + "io" + "os" + + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + switch v := f.(type) { + case *os.File: + _, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA) + return err == nil + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 000000000..05d2f91f1 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,33 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,!appengine + +package logrus + +import ( + "io" + "os" + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + switch v := f.(type) { + case *os.File: + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 000000000..ba8885406 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,189 @@ +package logrus + +import ( + "bytes" + "fmt" + "sort" + "strings" + "sync" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time +) + +func init() { + baseTimestamp = time.Now() +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // QuoteCharacter can be set to the override the default quoting character " + // with something else. For example: ', or `. + QuoteCharacter string + + // Whether the logger's out is to a terminal + isTerminal bool + + sync.Once +} + +func (f *TextFormatter) init(entry *Entry) { + if len(f.QuoteCharacter) == 0 { + f.QuoteCharacter = "\"" + } + if entry.Logger != nil { + f.isTerminal = IsTerminal(entry.Logger.Out) + } +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var b *bytes.Buffer + keys := make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + prefixFieldClashes(entry.Data) + + f.Do(func() { f.init(entry) }) + + isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + if entry.Message != "" { + f.appendKeyValue(b, "msg", entry.Message) + } + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) + b.WriteByte(' ') +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + switch value := value.(type) { + case string: + if !f.needsQuoting(value) { + b.WriteString(value) + } else { + fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter) + } + case error: + errmsg := value.Error() + if !f.needsQuoting(errmsg) { + b.WriteString(errmsg) + } else { + fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter) + } + default: + fmt.Fprint(b, value) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 000000000..7bdebedc6 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,62 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/creack/goselect/Dockerfile b/vendor/github.com/creack/goselect/Dockerfile new file mode 100644 index 000000000..d03b5a9d9 --- /dev/null +++ b/vendor/github.com/creack/goselect/Dockerfile @@ -0,0 +1,5 @@ +FROM google/golang:stable +MAINTAINER Guillaume J. Charmes +CMD /tmp/a.out +ADD . /src +RUN cd /src && go build -o /tmp/a.out diff --git a/vendor/github.com/creack/goselect/LICENSE b/vendor/github.com/creack/goselect/LICENSE new file mode 100644 index 000000000..95c600af1 --- /dev/null +++ b/vendor/github.com/creack/goselect/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Guillaume J. Charmes + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/creack/goselect/README.md b/vendor/github.com/creack/goselect/README.md new file mode 100644 index 000000000..d5d06510e --- /dev/null +++ b/vendor/github.com/creack/goselect/README.md @@ -0,0 +1,30 @@ +# go-select + +select(2) implementation in Go + +## Supported platforms + +| | 386 | amd64 | arm | arm64 | +|---------------|-----|-------|-----|-------| +| **linux** | yes | yes | yes | yes | +| **darwin** | yes | yes | n/a | ?? | +| **freebsd** | yes | yes | yes | ?? | +| **openbsd** | yes | yes | yes | ?? | +| **netbsd** | yes | yes | yes | ?? | +| **dragonfly** | n/a | yes | n/a | ?? | +| **solaris** | n/a | no | n/a | ?? | +| **plan9** | no | no | n/a | ?? | +| **windows** | yes | yes | n/a | ?? | +| **android** | n/a | n/a | no | ?? | + +*n/a: platform not supported by Go + +Go on `plan9` and `solaris` do not implement `syscall.Select` not `syscall.SYS_SELECT`. + +## Cross compile + +Using davecheney's https://github.com/davecheney/golang-crosscompile + +``` +export PLATFORMS="darwin/386 darwin/amd64 freebsd/386 freebsd/amd64 freebsd/arm linux/386 linux/amd64 linux/arm windows/386 windows/amd64 openbsd/386 openbsd/amd64 netbsd/386 netbsd/amd64 dragonfly/amd64 plan9/386 plan9/amd64 solaris/amd64" +``` diff --git a/vendor/github.com/creack/goselect/fdset.go b/vendor/github.com/creack/goselect/fdset.go new file mode 100644 index 000000000..2ff3f6c7a --- /dev/null +++ b/vendor/github.com/creack/goselect/fdset.go @@ -0,0 +1,33 @@ +// +build !freebsd,!windows,!plan9 + +package goselect + +import "syscall" + +const FD_SETSIZE = syscall.FD_SETSIZE + +// FDSet wraps syscall.FdSet with convenience methods +type FDSet syscall.FdSet + +// Set adds the fd to the set +func (fds *FDSet) Set(fd uintptr) { + fds.Bits[fd/NFDBITS] |= (1 << (fd % NFDBITS)) +} + +// Clear remove the fd from the set +func (fds *FDSet) Clear(fd uintptr) { + fds.Bits[fd/NFDBITS] &^= (1 << (fd % NFDBITS)) +} + +// IsSet check if the given fd is set +func (fds *FDSet) IsSet(fd uintptr) bool { + return fds.Bits[fd/NFDBITS]&(1<<(fd%NFDBITS)) != 0 +} + +// Keep a null set to avoid reinstatiation +var nullFdSet = &FDSet{} + +// Zero empties the Set +func (fds *FDSet) Zero() { + copy(fds.Bits[:], (nullFdSet).Bits[:]) +} diff --git a/vendor/github.com/creack/goselect/fdset_32.go b/vendor/github.com/creack/goselect/fdset_32.go new file mode 100644 index 000000000..8b90d21a9 --- /dev/null +++ b/vendor/github.com/creack/goselect/fdset_32.go @@ -0,0 +1,10 @@ +// +build darwin openbsd netbsd 386 arm + +package goselect + +// darwin, netbsd and openbsd uses uint32 on both amd64 and 386 + +const ( + // NFDBITS is the amount of bits per mask + NFDBITS = 4 * 8 +) diff --git a/vendor/github.com/creack/goselect/fdset_64.go b/vendor/github.com/creack/goselect/fdset_64.go new file mode 100644 index 000000000..4e032e4ea --- /dev/null +++ b/vendor/github.com/creack/goselect/fdset_64.go @@ -0,0 +1,11 @@ +// +build !darwin,!netbsd,!openbsd +// +build amd64 arm64 + +package goselect + +// darwin, netbsd and openbsd uses uint32 on both amd64 and 386 + +const ( + // NFDBITS is the amount of bits per mask + NFDBITS = 8 * 8 +) diff --git a/vendor/github.com/creack/goselect/fdset_doc.go b/vendor/github.com/creack/goselect/fdset_doc.go new file mode 100644 index 000000000..d9f15a1ce --- /dev/null +++ b/vendor/github.com/creack/goselect/fdset_doc.go @@ -0,0 +1,93 @@ +package goselect + +/** +From: XCode's MacOSX10.10.sdk/System/Library/Frameworks/Kernel.framework/Versions/A/Headers/sys/select.h +-- +// darwin/amd64 / 386 +sizeof(__int32_t) == 4 +-- + +typedef __int32_t __fd_mask; + +#define FD_SETSIZE 1024 +#define __NFDBITS (sizeof(__fd_mask) * 8) +#define __howmany(x, y) ((((x) % (y)) == 0) ? ((x) / (y)) : (((x) / (y)) + 1)) + +typedef struct fd_set { + __fd_mask fds_bits[__howmany(__FD_SETSIZE, __NFDBITS)]; +} fd_set; + +#define __FD_MASK(n) ((__fd_mask)1 << ((n) % __NFDBITS)) +#define FD_SET(n, p) ((p)->fds_bits[(n)/__NFDBITS] |= __FD_MASK(n)) +#define FD_CLR(n, p) ((p)->fds_bits[(n)/__NFDBITS] &= ~__FD_MASK(n)) +#define FD_ISSET(n, p) (((p)->fds_bits[(n)/__NFDBITS] & __FD_MASK(n)) != 0) +*/ + +/** +From: /usr/include/i386-linux-gnu/sys/select.h +-- +// linux/i686 +sizeof(long int) == 4 +-- + +typedef long int __fd_mask; + +#define FD_SETSIZE 1024 +#define __NFDBITS (sizeof(__fd_mask) * 8) + + +typedef struct fd_set { + __fd_mask fds_bits[__FD_SETSIZE / __NFDBITS]; +} fd_set; + +#define __FD_MASK(n) ((__fd_mask)1 << ((n) % __NFDBITS)) +#define FD_SET(n, p) ((p)->fds_bits[(n)/__NFDBITS] |= __FD_MASK(n)) +#define FD_CLR(n, p) ((p)->fds_bits[(n)/__NFDBITS] &= ~__FD_MASK(n)) +#define FD_ISSET(n, p) (((p)->fds_bits[(n)/__NFDBITS] & __FD_MASK(n)) != 0) +*/ + +/** +From: /usr/include/x86_64-linux-gnu/sys/select.h +-- +// linux/amd64 +sizeof(long int) == 8 +-- + +typedef long int __fd_mask; + +#define FD_SETSIZE 1024 +#define __NFDBITS (sizeof(__fd_mask) * 8) + + +typedef struct fd_set { + __fd_mask fds_bits[__FD_SETSIZE / __NFDBITS]; +} fd_set; + +#define __FD_MASK(n) ((__fd_mask)1 << ((n) % __NFDBITS)) +#define FD_SET(n, p) ((p)->fds_bits[(n)/__NFDBITS] |= __FD_MASK(n)) +#define FD_CLR(n, p) ((p)->fds_bits[(n)/__NFDBITS] &= ~__FD_MASK(n)) +#define FD_ISSET(n, p) (((p)->fds_bits[(n)/__NFDBITS] & __FD_MASK(n)) != 0) +*/ + +/** +From: /usr/include/sys/select.h +-- +// freebsd/amd64 +sizeof(unsigned long) == 8 +-- + +typedef unsigned long __fd_mask; + +#define FD_SETSIZE 1024U +#define __NFDBITS (sizeof(__fd_mask) * 8) +#define _howmany(x, y) (((x) + ((y) - 1)) / (y)) + +typedef struct fd_set { + __fd_mask fds_bits[_howmany(FD_SETSIZE, __NFDBITS)]; +} fd_set; + +#define __FD_MASK(n) ((__fd_mask)1 << ((n) % __NFDBITS)) +#define FD_SET(n, p) ((p)->fds_bits[(n)/__NFDBITS] |= __FD_MASK(n)) +#define FD_CLR(n, p) ((p)->fds_bits[(n)/__NFDBITS] &= ~__FD_MASK(n)) +#define FD_ISSET(n, p) (((p)->fds_bits[(n)/__NFDBITS] & __FD_MASK(n)) != 0) +*/ diff --git a/vendor/github.com/creack/goselect/fdset_freebsd.go b/vendor/github.com/creack/goselect/fdset_freebsd.go new file mode 100644 index 000000000..03f7b9127 --- /dev/null +++ b/vendor/github.com/creack/goselect/fdset_freebsd.go @@ -0,0 +1,33 @@ +// +build freebsd + +package goselect + +import "syscall" + +const FD_SETSIZE = syscall.FD_SETSIZE + +// FDSet wraps syscall.FdSet with convenience methods +type FDSet syscall.FdSet + +// Set adds the fd to the set +func (fds *FDSet) Set(fd uintptr) { + fds.X__fds_bits[fd/NFDBITS] |= (1 << (fd % NFDBITS)) +} + +// Clear remove the fd from the set +func (fds *FDSet) Clear(fd uintptr) { + fds.X__fds_bits[fd/NFDBITS] &^= (1 << (fd % NFDBITS)) +} + +// IsSet check if the given fd is set +func (fds *FDSet) IsSet(fd uintptr) bool { + return fds.X__fds_bits[fd/NFDBITS]&(1<<(fd%NFDBITS)) != 0 +} + +// Keep a null set to avoid reinstatiation +var nullFdSet = &FDSet{} + +// Zero empties the Set +func (fds *FDSet) Zero() { + copy(fds.X__fds_bits[:], (nullFdSet).X__fds_bits[:]) +} diff --git a/vendor/github.com/creack/goselect/fdset_unsupported.go b/vendor/github.com/creack/goselect/fdset_unsupported.go new file mode 100644 index 000000000..cbd8d5880 --- /dev/null +++ b/vendor/github.com/creack/goselect/fdset_unsupported.go @@ -0,0 +1,20 @@ +// +build plan9 + +package goselect + +const FD_SETSIZE = 0 + +// FDSet wraps syscall.FdSet with convenience methods +type FDSet struct{} + +// Set adds the fd to the set +func (fds *FDSet) Set(fd uintptr) {} + +// Clear remove the fd from the set +func (fds *FDSet) Clear(fd uintptr) {} + +// IsSet check if the given fd is set +func (fds *FDSet) IsSet(fd uintptr) bool { return false } + +// Zero empties the Set +func (fds *FDSet) Zero() {} diff --git a/vendor/github.com/creack/goselect/fdset_windows.go b/vendor/github.com/creack/goselect/fdset_windows.go new file mode 100644 index 000000000..ee3491975 --- /dev/null +++ b/vendor/github.com/creack/goselect/fdset_windows.go @@ -0,0 +1,57 @@ +// +build windows + +package goselect + +import "syscall" + +const FD_SETSIZE = 64 + +// FDSet extracted from mingw libs source code +type FDSet struct { + fd_count uint + fd_array [FD_SETSIZE]uintptr +} + +// Set adds the fd to the set +func (fds *FDSet) Set(fd uintptr) { + var i uint + for i = 0; i < fds.fd_count; i++ { + if fds.fd_array[i] == fd { + break + } + } + if i == fds.fd_count { + if fds.fd_count < FD_SETSIZE { + fds.fd_array[i] = fd + fds.fd_count++ + } + } +} + +// Clear remove the fd from the set +func (fds *FDSet) Clear(fd uintptr) { + var i uint + for i = 0; i < fds.fd_count; i++ { + if fds.fd_array[i] == fd { + for i < fds.fd_count-1 { + fds.fd_array[i] = fds.fd_array[i+1] + i++ + } + fds.fd_count-- + break + } + } +} + +// IsSet check if the given fd is set +func (fds *FDSet) IsSet(fd uintptr) bool { + if isset, err := __WSAFDIsSet(syscall.Handle(fd), fds); err == nil && isset != 0 { + return true + } + return false +} + +// Zero empties the Set +func (fds *FDSet) Zero() { + fds.fd_count = 0 +} diff --git a/vendor/github.com/creack/goselect/select.go b/vendor/github.com/creack/goselect/select.go new file mode 100644 index 000000000..7f2875e73 --- /dev/null +++ b/vendor/github.com/creack/goselect/select.go @@ -0,0 +1,28 @@ +package goselect + +import ( + "syscall" + "time" +) + +// Select wraps syscall.Select with Go types +func Select(n int, r, w, e *FDSet, timeout time.Duration) error { + var timeval *syscall.Timeval + if timeout >= 0 { + t := syscall.NsecToTimeval(timeout.Nanoseconds()) + timeval = &t + } + + return sysSelect(n, r, w, e, timeval) +} + +// RetrySelect wraps syscall.Select with Go types, and retries a number of times, with a given retryDelay. +func RetrySelect(n int, r, w, e *FDSet, timeout time.Duration, retries int, retryDelay time.Duration) (err error) { + for i := 0; i < retries; i++ { + if err = Select(n, r, w, e, timeout); err != syscall.EINTR { + return err + } + time.Sleep(retryDelay) + } + return err +} diff --git a/vendor/github.com/creack/goselect/select_linux.go b/vendor/github.com/creack/goselect/select_linux.go new file mode 100644 index 000000000..acd569e9d --- /dev/null +++ b/vendor/github.com/creack/goselect/select_linux.go @@ -0,0 +1,10 @@ +// +build linux + +package goselect + +import "syscall" + +func sysSelect(n int, r, w, e *FDSet, timeout *syscall.Timeval) error { + _, err := syscall.Select(n, (*syscall.FdSet)(r), (*syscall.FdSet)(w), (*syscall.FdSet)(e), timeout) + return err +} diff --git a/vendor/github.com/creack/goselect/select_other.go b/vendor/github.com/creack/goselect/select_other.go new file mode 100644 index 000000000..6c8208147 --- /dev/null +++ b/vendor/github.com/creack/goselect/select_other.go @@ -0,0 +1,9 @@ +// +build !linux,!windows,!plan9,!solaris + +package goselect + +import "syscall" + +func sysSelect(n int, r, w, e *FDSet, timeout *syscall.Timeval) error { + return syscall.Select(n, (*syscall.FdSet)(r), (*syscall.FdSet)(w), (*syscall.FdSet)(e), timeout) +} diff --git a/vendor/github.com/creack/goselect/select_unsupported.go b/vendor/github.com/creack/goselect/select_unsupported.go new file mode 100644 index 000000000..bea0ad94a --- /dev/null +++ b/vendor/github.com/creack/goselect/select_unsupported.go @@ -0,0 +1,16 @@ +// +build plan9 solaris + +package goselect + +import ( + "fmt" + "runtime" + "syscall" +) + +// ErrUnsupported . +var ErrUnsupported = fmt.Errorf("Platofrm %s/%s unsupported", runtime.GOOS, runtime.GOARCH) + +func sysSelect(n int, r, w, e *FDSet, timeout *syscall.Timeval) error { + return ErrUnsupported +} diff --git a/vendor/github.com/creack/goselect/select_windows.go b/vendor/github.com/creack/goselect/select_windows.go new file mode 100644 index 000000000..0fb70d5d2 --- /dev/null +++ b/vendor/github.com/creack/goselect/select_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package goselect + +import "syscall" + +//sys _select(nfds int, readfds *FDSet, writefds *FDSet, exceptfds *FDSet, timeout *syscall.Timeval) (total int, err error) = ws2_32.select +//sys __WSAFDIsSet(handle syscall.Handle, fdset *FDSet) (isset int, err error) = ws2_32.__WSAFDIsSet + +func sysSelect(n int, r, w, e *FDSet, timeout *syscall.Timeval) error { + _, err := _select(n, r, w, e, timeout) + return err +} diff --git a/vendor/github.com/creack/goselect/test_crosscompile.sh b/vendor/github.com/creack/goselect/test_crosscompile.sh new file mode 100755 index 000000000..533ca6647 --- /dev/null +++ b/vendor/github.com/creack/goselect/test_crosscompile.sh @@ -0,0 +1,17 @@ +export GOOS=linux; export GOARCH=arm; echo $GOOS/$GOARCH; go build +export GOOS=linux; export GOARCH=arm64; echo $GOOS/$GOARCH; go build +export GOOS=linux; export GOARCH=amd64; echo $GOOS/$GOARCH; go build +export GOOS=linux; export GOARCH=386; echo $GOOS/$GOARCH; go build +export GOOS=darwin; export GOARCH=arm; echo $GOOS/$GOARCH; go build +export GOOS=darwin; export GOARCH=arm64; echo $GOOS/$GOARCH; go build +export GOOS=darwin; export GOARCH=amd64; echo $GOOS/$GOARCH; go build +export GOOS=darwin; export GOARCH=386; echo $GOOS/$GOARCH; go build +export GOOS=freebsd; export GOARCH=arm; echo $GOOS/$GOARCH; go build +export GOOS=freebsd; export GOARCH=amd64; echo $GOOS/$GOARCH; go build +export GOOS=freebsd; export GOARCH=386; echo $GOOS/$GOARCH; go build +export GOOS=openbsd; export GOARCH=arm; echo $GOOS/$GOARCH; go build +export GOOS=openbsd; export GOARCH=amd64; echo $GOOS/$GOARCH; go build +export GOOS=openbsd; export GOARCH=386; echo $GOOS/$GOARCH; go build +export GOOS=netbsd; export GOARCH=arm; echo $GOOS/$GOARCH; go build +export GOOS=netbsd; export GOARCH=amd64; echo $GOOS/$GOARCH; go build +export GOOS=netbsd; export GOARCH=386; echo $GOOS/$GOARCH; go build diff --git a/vendor/github.com/creack/goselect/zselect_windows.go b/vendor/github.com/creack/goselect/zselect_windows.go new file mode 100644 index 000000000..c3b10e1d0 --- /dev/null +++ b/vendor/github.com/creack/goselect/zselect_windows.go @@ -0,0 +1,41 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package goselect + +import "unsafe" +import "syscall" + +var _ unsafe.Pointer + +var ( + modws2_32 = syscall.NewLazyDLL("ws2_32.dll") + + procselect = modws2_32.NewProc("select") + proc__WSAFDIsSet = modws2_32.NewProc("__WSAFDIsSet") +) + +func _select(nfds int, readfds *FDSet, writefds *FDSet, exceptfds *FDSet, timeout *syscall.Timeval) (total int, err error) { + r0, _, e1 := syscall.Syscall6(procselect.Addr(), 5, uintptr(nfds), uintptr(unsafe.Pointer(readfds)), uintptr(unsafe.Pointer(writefds)), uintptr(unsafe.Pointer(exceptfds)), uintptr(unsafe.Pointer(timeout)), 0) + total = int(r0) + if total == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func __WSAFDIsSet(handle syscall.Handle, fdset *FDSet) (isset int, err error) { + r0, _, e1 := syscall.Syscall(proc__WSAFDIsSet.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(fdset)), 0) + isset = int(r0) + if isset == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE new file mode 100644 index 000000000..9c8e20ab8 --- /dev/null +++ b/vendor/github.com/docker/docker/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE new file mode 100644 index 000000000..0c74e15b0 --- /dev/null +++ b/vendor/github.com/docker/docker/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go new file mode 100644 index 000000000..d732a4795 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go @@ -0,0 +1,608 @@ +package namesgenerator + +import ( + "fmt" + + "github.com/docker/docker/pkg/random" +) + +var ( + left = [...]string{ + "admiring", + "adoring", + "affectionate", + "agitated", + "amazing", + "angry", + "awesome", + "blissful", + "boring", + "brave", + "clever", + "cocky", + "compassionate", + "competent", + "condescending", + "confident", + "cranky", + "dazzling", + "determined", + "distracted", + "dreamy", + "eager", + "ecstatic", + "elastic", + "elated", + "elegant", + "eloquent", + "epic", + "fervent", + "festive", + "flamboyant", + "focused", + "friendly", + "frosty", + "gallant", + "gifted", + "goofy", + "gracious", + "happy", + "hardcore", + "heuristic", + "hopeful", + "hungry", + "infallible", + "inspiring", + "jolly", + "jovial", + "keen", + "kind", + "laughing", + "loving", + "lucid", + "mystifying", + "modest", + "musing", + "naughty", + "nervous", + "nifty", + "nostalgic", + "objective", + "optimistic", + "peaceful", + "pedantic", + "pensive", + "practical", + "priceless", + "quirky", + "quizzical", + "relaxed", + "reverent", + "romantic", + "sad", + "serene", + "sharp", + "silly", + "sleepy", + "stoic", + "stupefied", + "suspicious", + "tender", + "thirsty", + "trusting", + "unruffled", + "upbeat", + "vibrant", + "vigilant", + "vigorous", + "wizardly", + "wonderful", + "xenodochial", + "youthful", + "zealous", + "zen", + } + + // Docker, starting from 0.7.x, generates names from notable scientists and hackers. + // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. + right = [...]string{ + // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB + "albattani", + + // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen + "allen", + + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida + "almeida", + + // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi + "agnesi", + + // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes + "archimedes", + + // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli + "ardinghelli", + + // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata + "aryabhata", + + // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin + "austin", + + // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. + "babbage", + + // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach + "banach", + + // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen + "bardeen", + + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik + "bartik", + + // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi + "bassi", + + // Hugh Beaver, British engineer, founder of the Guinness Book of World Records https://en.wikipedia.org/wiki/Hugh_Beaver + "beaver", + + // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell + "bell", + + // Karl Friedrich Benz - a German automobile engineer. Inventor of the first practical motorcar. https://en.wikipedia.org/wiki/Karl_Benz + "benz", + + // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha + "bhabha", + + // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus + "bhaskara", + + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell + "blackwell", + + // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. + "bohr", + + // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth + "booth", + + // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg + "borg", + + // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose + "bose", + + // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville + "boyd", + + // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero + "brahmagupta", + + // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain + "brattain", + + // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) + "brown", + + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson + "carson", + + // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar + "chandrasekhar", + + //Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) + "shannon", + + // Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained top secret for decades. Also an accomplished numismatist https://en.wikipedia.org/wiki/Joan_Clarke + "clarke", + + // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden + "colden", + + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori + "cori", + + // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray + "cray", + + // This entry reflects a husband and wife team who worked together: + // Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran + // Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran + "curran", + + // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. + "curie", + + // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. + "darwin", + + // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. + "davinci", + + // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. + "dijkstra", + + // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky + "dubinsky", + + // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley + "easley", + + // Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison + "edison", + + // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein + "einstein", + + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion + "elion", + + // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart + "engelbart", + + // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid + "euclid", + + // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler + "euler", + + // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat + "fermat", + + // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. + "fermi", + + // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman + "feynman", + + // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. + "franklin", + + // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei + "galileo", + + // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates + "gates", + + // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) + "goldberg", + + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine + "goldstine", + + // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser + "goldwasser", + + // James Golick, all around gangster. + "golick", + + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall + "goodall", + + // Lois Haibt - American computer scientist, part of the team at IBM that developed FORTRAN - https://en.wikipedia.org/wiki/Lois_Haibt + "haibt", + + // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) + "hamilton", + + // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking + "hawking", + + // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg + "heisenberg", + + // Grete Hermann was a German philosopher noted for her philosophical work on the foundations of quantum mechanics. https://en.wikipedia.org/wiki/Grete_Hermann + "hermann", + + // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD + "heyrovsky", + + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin + "hodgkin", + + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover + "hoover", + + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper + "hopper", + + // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle + "hugle", + + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia + "hypatia", + + // Mary Jackson, American mathematician and aerospace engineer who earned the highest title within NASA's engineering department - https://en.wikipedia.org/wiki/Mary_Jackson_(engineer) + "jackson", + + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil + "jang", + + // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik + "jennings", + + // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen + "jepsen", + + // Katherine Coleman Goble Johnson - American physicist and mathematician contributed to the NASA. https://en.wikipedia.org/wiki/Katherine_Johnson + "johnson", + + // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie + "joliot", + + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones + "jones", + + // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam + "kalam", + + // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare + "kare", + + // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller + "keller", + + // Johannes Kepler, German astronomer known for his three laws of planetary motion - https://en.wikipedia.org/wiki/Johannes_Kepler + "kepler", + + // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana + "khorana", + + // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby + "kilby", + + // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + "kirch", + + // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth + "knuth", + + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya + "kowalevski", + + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + "lalande", + + // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr + "lamarr", + + // Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport + "lamport", + + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey + "leakey", + + // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt + "leavitt", + + //Daniel Lewin - Mathematician, Akamai co-founder, soldier, 9/11 victim-- Developed optimization techniques for routing traffic on the internet. Died attempting to stop the 9-11 hijackers. https://en.wikipedia.org/wiki/Daniel_Lewin + "lewin", + + // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum + "lichterman", + + // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov + "liskov", + + // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + "lovelace", + + // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re + "lumiere", + + // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) + "mahavira", + + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer + "mayer", + + // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + "mccarthy", + + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock + "mcclintock", + + // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean + "mclean", + + // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli + "mcnulty", + + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner + "meitner", + + // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky + "meninsky", + + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf + "mestorf", + + // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky + "minsky", + + // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani + "mirzakhani", + + // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse + "morse", + + // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock + "murdock", + + // John von Neumann - todays computer architectures are based on the von Neumann architecture. https://en.wikipedia.org/wiki/Von_Neumann_architecture + "neumann", + + // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton + "newton", + + // Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https://en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform + "nightingale", + + // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel + "nobel", + + // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether + "noether", + + // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 + "northcutt", + + // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce + "noyce", + + // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems + "panini", + + // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 + "pare", + + // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. + "pasteur", + + // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin + "payne", + + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman + "perlman", + + // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike + "pike", + + // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + "poincare", + + // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras + "poitras", + + // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy + "ptolemy", + + // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman + "raman", + + // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan + "ramanujan", + + // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride + "ride", + + // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) + "montalcini", + + // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie + "ritchie", + + // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen + "roentgen", + + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin + "rosalind", + + // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha + "saha", + + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet + "sammet", + + // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) + "shaw", + + // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley + "shirley", + + // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley + "shockley", + + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi + "sinoussi", + + // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton + "snyder", + + // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence + "spence", + + // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman + "stallman", + + // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker + "stonebraker", + + // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson + "swanson", + + // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz + "swartz", + + // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles + "swirles", + + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla + "tesla", + + // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson + "thompson", + + // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds + "torvalds", + + // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. + "turing", + + // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions + "varahamihira", + + // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya + "visvesvaraya", + + // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard + "volhard", + + // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer + "wescoff", + + // Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https://en.wikipedia.org/wiki/Andrew_Wiles + "wiles", + + // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams + "williams", + + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson + "wilson", + + // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing + "wing", + + // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak + "wozniak", + + // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers + "wright", + + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow + "yalow", + + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath + "yonath", + } +) + +// GetRandomName generates a random name from the list of adjectives and surnames in this package +// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random +// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` +func GetRandomName(retry int) string { + rnd := random.Rand +begin: + name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin + } + + if retry > 0 { + name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) + } + return name +} diff --git a/vendor/github.com/docker/docker/pkg/random/random.go b/vendor/github.com/docker/docker/pkg/random/random.go new file mode 100644 index 000000000..70de4d130 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/random/random.go @@ -0,0 +1,71 @@ +package random + +import ( + cryptorand "crypto/rand" + "io" + "math" + "math/big" + "math/rand" + "sync" + "time" +) + +// Rand is a global *rand.Rand instance, which initialized with NewSource() source. +var Rand = rand.New(NewSource()) + +// Reader is a global, shared instance of a pseudorandom bytes generator. +// It doesn't consume entropy. +var Reader io.Reader = &reader{rnd: Rand} + +// copypaste from standard math/rand +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// NewSource returns math/rand.Source safe for concurrent use and initialized +// with current unix-nano timestamp +func NewSource() rand.Source { + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + return &lockedSource{ + src: rand.NewSource(seed), + } +} + +type reader struct { + rnd *rand.Rand +} + +func (r *reader) Read(b []byte) (int, error) { + i := 0 + for { + val := r.rnd.Int63() + for val > 0 { + b[i] = byte(val) + i++ + if i == len(b) { + return i, nil + } + val >>= 8 + } + } +} diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE new file mode 100644 index 000000000..8d9a94a90 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2005-2008 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown new file mode 100644 index 000000000..f69d3c870 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/README.markdown @@ -0,0 +1,92 @@ +# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) + +Just a few functions for helping humanize times and sizes. + +`go get` it as `github.com/dustin/go-humanize`, import it as +`"github.com/dustin/go-humanize"`, use it as `humanize`. + +See [godoc](https://godoc.org/github.com/dustin/go-humanize) for +complete documentation. + +## Sizes + +This lets you take numbers like `82854982` and convert them to useful +strings like, `83 MB` or `79 MiB` (whichever you prefer). + +Example: + +```go +fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. +``` + +## Times + +This lets you take a `time.Time` and spit it out in relative terms. +For example, `12 seconds ago` or `3 days from now`. + +Example: + +```go +fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. +``` + +Thanks to Kyle Lemons for the time implementation from an IRC +conversation one day. It's pretty neat. + +## Ordinals + +From a [mailing list discussion][odisc] where a user wanted to be able +to label ordinals. + + 0 -> 0th + 1 -> 1st + 2 -> 2nd + 3 -> 3rd + 4 -> 4th + [...] + +Example: + +```go +fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. +``` + +## Commas + +Want to shove commas into numbers? Be my guest. + + 0 -> 0 + 100 -> 100 + 1000 -> 1,000 + 1000000000 -> 1,000,000,000 + -100000 -> -100,000 + +Example: + +```go +fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. +``` + +## Ftoa + +Nicer float64 formatter that removes trailing zeros. + +```go +fmt.Printf("%f", 2.24) // 2.240000 +fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 +fmt.Printf("%f", 2.0) // 2.000000 +fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 +``` + +## SI notation + +Format numbers with [SI notation][sinotation]. + +Example: + +```go +humanize.SI(0.00000000223, "M") // 2.23 nM +``` + +[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion +[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go new file mode 100644 index 000000000..f49dc337d --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/big.go @@ -0,0 +1,31 @@ +package humanize + +import ( + "math/big" +) + +// order of magnitude (to a max order) +func oomm(n, b *big.Int, maxmag int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + if mag == maxmag && maxmag >= 0 { + break + } + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} + +// total order of magnitude +// (same as above, but with no upper limit) +func oom(n, b *big.Int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go new file mode 100644 index 000000000..1a2bf6172 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bigbytes.go @@ -0,0 +1,173 @@ +package humanize + +import ( + "fmt" + "math/big" + "strings" + "unicode" +) + +var ( + bigIECExp = big.NewInt(1024) + + // BigByte is one byte in bit.Ints + BigByte = big.NewInt(1) + // BigKiByte is 1,024 bytes in bit.Ints + BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) + // BigMiByte is 1,024 k bytes in bit.Ints + BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) + // BigGiByte is 1,024 m bytes in bit.Ints + BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) + // BigTiByte is 1,024 g bytes in bit.Ints + BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) + // BigPiByte is 1,024 t bytes in bit.Ints + BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) + // BigEiByte is 1,024 p bytes in bit.Ints + BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) + // BigZiByte is 1,024 e bytes in bit.Ints + BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) + // BigYiByte is 1,024 z bytes in bit.Ints + BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) +) + +var ( + bigSIExp = big.NewInt(1000) + + // BigSIByte is one SI byte in big.Ints + BigSIByte = big.NewInt(1) + // BigKByte is 1,000 SI bytes in big.Ints + BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) + // BigMByte is 1,000 SI k bytes in big.Ints + BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) + // BigGByte is 1,000 SI m bytes in big.Ints + BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) + // BigTByte is 1,000 SI g bytes in big.Ints + BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) + // BigPByte is 1,000 SI t bytes in big.Ints + BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) + // BigEByte is 1,000 SI p bytes in big.Ints + BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) + // BigZByte is 1,000 SI e bytes in big.Ints + BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) + // BigYByte is 1,000 SI z bytes in big.Ints + BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) +) + +var bigBytesSizeTable = map[string]*big.Int{ + "b": BigByte, + "kib": BigKiByte, + "kb": BigKByte, + "mib": BigMiByte, + "mb": BigMByte, + "gib": BigGiByte, + "gb": BigGByte, + "tib": BigTiByte, + "tb": BigTByte, + "pib": BigPiByte, + "pb": BigPByte, + "eib": BigEiByte, + "eb": BigEByte, + "zib": BigZiByte, + "zb": BigZByte, + "yib": BigYiByte, + "yb": BigYByte, + // Without suffix + "": BigByte, + "ki": BigKiByte, + "k": BigKByte, + "mi": BigMiByte, + "m": BigMByte, + "gi": BigGiByte, + "g": BigGByte, + "ti": BigTiByte, + "t": BigTByte, + "pi": BigPiByte, + "p": BigPByte, + "ei": BigEiByte, + "e": BigEByte, + "z": BigZByte, + "zi": BigZiByte, + "y": BigYByte, + "yi": BigYiByte, +} + +var ten = big.NewInt(10) + +func humanateBigBytes(s, base *big.Int, sizes []string) string { + if s.Cmp(ten) < 0 { + return fmt.Sprintf("%d B", s) + } + c := (&big.Int{}).Set(s) + val, mag := oomm(c, base, len(sizes)-1) + suffix := sizes[mag] + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) + +} + +// BigBytes produces a human readable representation of an SI size. +// +// See also: ParseBigBytes. +// +// BigBytes(82854982) -> 83 MB +func BigBytes(s *big.Int) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + return humanateBigBytes(s, bigSIExp, sizes) +} + +// BigIBytes produces a human readable representation of an IEC size. +// +// See also: ParseBigBytes. +// +// BigIBytes(82854982) -> 79 MiB +func BigIBytes(s *big.Int) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + return humanateBigBytes(s, bigIECExp, sizes) +} + +// ParseBigBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See also: BigBytes, BigIBytes. +// +// ParseBigBytes("42 MB") -> 42000000, nil +// ParseBigBytes("42 mib") -> 44040192, nil +func ParseBigBytes(s string) (*big.Int, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + val := &big.Rat{} + _, err := fmt.Sscanf(num, "%f", val) + if err != nil { + return nil, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bigBytesSizeTable[extra]; ok { + mv := (&big.Rat{}).SetInt(m) + val.Mul(val, mv) + rv := &big.Int{} + rv.Div(val.Num(), val.Denom()) + return rv, nil + } + + return nil, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go new file mode 100644 index 000000000..0b498f488 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bytes.go @@ -0,0 +1,143 @@ +package humanize + +import ( + "fmt" + "math" + "strconv" + "strings" + "unicode" +) + +// IEC Sizes. +// kibis of bits +const ( + Byte = 1 << (iota * 10) + KiByte + MiByte + GiByte + TiByte + PiByte + EiByte +) + +// SI Sizes. +const ( + IByte = 1 + KByte = IByte * 1000 + MByte = KByte * 1000 + GByte = MByte * 1000 + TByte = GByte * 1000 + PByte = TByte * 1000 + EByte = PByte * 1000 +) + +var bytesSizeTable = map[string]uint64{ + "b": Byte, + "kib": KiByte, + "kb": KByte, + "mib": MiByte, + "mb": MByte, + "gib": GiByte, + "gb": GByte, + "tib": TiByte, + "tb": TByte, + "pib": PiByte, + "pb": PByte, + "eib": EiByte, + "eb": EByte, + // Without suffix + "": Byte, + "ki": KiByte, + "k": KByte, + "mi": MiByte, + "m": MByte, + "gi": GiByte, + "g": GByte, + "ti": TiByte, + "t": TByte, + "pi": PiByte, + "p": PByte, + "ei": EiByte, + "e": EByte, +} + +func logn(n, b float64) float64 { + return math.Log(n) / math.Log(b) +} + +func humanateBytes(s uint64, base float64, sizes []string) string { + if s < 10 { + return fmt.Sprintf("%d B", s) + } + e := math.Floor(logn(float64(s), base)) + suffix := sizes[int(e)] + val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) +} + +// Bytes produces a human readable representation of an SI size. +// +// See also: ParseBytes. +// +// Bytes(82854982) -> 83 MB +func Bytes(s uint64) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} + return humanateBytes(s, 1000, sizes) +} + +// IBytes produces a human readable representation of an IEC size. +// +// See also: ParseBytes. +// +// IBytes(82854982) -> 79 MiB +func IBytes(s uint64) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} + return humanateBytes(s, 1024, sizes) +} + +// ParseBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See Also: Bytes, IBytes. +// +// ParseBytes("42 MB") -> 42000000, nil +// ParseBytes("42 mib") -> 44040192, nil +func ParseBytes(s string) (uint64, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + f, err := strconv.ParseFloat(num, 64) + if err != nil { + return 0, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bytesSizeTable[extra]; ok { + f *= float64(m) + if f >= math.MaxUint64 { + return 0, fmt.Errorf("too large: %v", s) + } + return uint64(f), nil + } + + return 0, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go new file mode 100644 index 000000000..eb285cb95 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/comma.go @@ -0,0 +1,108 @@ +package humanize + +import ( + "bytes" + "math" + "math/big" + "strconv" + "strings" +) + +// Comma produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Comma(834142) -> 834,142 +func Comma(v int64) string { + sign := "" + + // minin64 can't be negated to a usable value, so it has to be special cased. + if v == math.MinInt64 { + return "-9,223,372,036,854,775,808" + } + + if v < 0 { + sign = "-" + v = 0 - v + } + + parts := []string{"", "", "", "", "", "", ""} + j := len(parts) - 1 + + for v > 999 { + parts[j] = strconv.FormatInt(v%1000, 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + v = v / 1000 + j-- + } + parts[j] = strconv.Itoa(int(v)) + return sign + strings.Join(parts[j:], ",") +} + +// Commaf produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Commaf(834142.32) -> 834,142.32 +func Commaf(v float64) string { + buf := &bytes.Buffer{} + if v < 0 { + buf.Write([]byte{'-'}) + v = 0 - v + } + + comma := []byte{','} + + parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} + +// BigComma produces a string form of the given big.Int in base 10 +// with commas after every three orders of magnitude. +func BigComma(b *big.Int) string { + sign := "" + if b.Sign() < 0 { + sign = "-" + b.Abs(b) + } + + athousand := big.NewInt(1000) + c := (&big.Int{}).Set(b) + _, m := oom(c, athousand) + parts := make([]string, m+1) + j := len(parts) - 1 + + mod := &big.Int{} + for b.Cmp(athousand) >= 0 { + b.DivMod(b, athousand, mod) + parts[j] = strconv.FormatInt(mod.Int64(), 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + j-- + } + parts[j] = strconv.Itoa(int(b.Int64())) + return sign + strings.Join(parts[j:], ",") +} diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go new file mode 100644 index 000000000..620690dec --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/commaf.go @@ -0,0 +1,40 @@ +// +build go1.6 + +package humanize + +import ( + "bytes" + "math/big" + "strings" +) + +// BigCommaf produces a string form of the given big.Float in base 10 +// with commas after every three orders of magnitude. +func BigCommaf(v *big.Float) string { + buf := &bytes.Buffer{} + if v.Sign() < 0 { + buf.Write([]byte{'-'}) + v.Abs(v) + } + + comma := []byte{','} + + parts := strings.Split(v.Text('f', -1), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go new file mode 100644 index 000000000..c76190b10 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ftoa.go @@ -0,0 +1,23 @@ +package humanize + +import "strconv" + +func stripTrailingZeros(s string) string { + offset := len(s) - 1 + for offset > 0 { + if s[offset] == '.' { + offset-- + break + } + if s[offset] != '0' { + break + } + offset-- + } + return s[:offset+1] +} + +// Ftoa converts a float to a string with no trailing zeros. +func Ftoa(num float64) string { + return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) +} diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go new file mode 100644 index 000000000..a2c2da31e --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/humanize.go @@ -0,0 +1,8 @@ +/* +Package humanize converts boring ugly numbers to human-friendly strings and back. + +Durations can be turned into strings such as "3 days ago", numbers +representing sizes like 82854982 into useful strings like, "83 MB" or +"79 MiB" (whichever you prefer). +*/ +package humanize diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go new file mode 100644 index 000000000..dec618659 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/number.go @@ -0,0 +1,192 @@ +package humanize + +/* +Slightly adapted from the source to fit go-humanize. + +Author: https://github.com/gorhill +Source: https://gist.github.com/gorhill/5285193 + +*/ + +import ( + "math" + "strconv" +) + +var ( + renderFloatPrecisionMultipliers = [...]float64{ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + } + + renderFloatPrecisionRounders = [...]float64{ + 0.5, + 0.05, + 0.005, + 0.0005, + 0.00005, + 0.000005, + 0.0000005, + 0.00000005, + 0.000000005, + 0.0000000005, + } +) + +// FormatFloat produces a formatted number as string based on the following user-specified criteria: +// * thousands separator +// * decimal separator +// * decimal precision +// +// Usage: s := RenderFloat(format, n) +// The format parameter tells how to render the number n. +// +// See examples: http://play.golang.org/p/LXc1Ddm1lJ +// +// Examples of format strings, given n = 12345.6789: +// "#,###.##" => "12,345.67" +// "#,###." => "12,345" +// "#,###" => "12345,678" +// "#\u202F###,##" => "12 345,68" +// "#.###,###### => 12.345,678900 +// "" (aka default format) => 12,345.67 +// +// The highest precision allowed is 9 digits after the decimal symbol. +// There is also a version for integer number, FormatInteger(), +// which is convenient for calls within template. +func FormatFloat(format string, n float64) string { + // Special cases: + // NaN = "NaN" + // +Inf = "+Infinity" + // -Inf = "-Infinity" + if math.IsNaN(n) { + return "NaN" + } + if n > math.MaxFloat64 { + return "Infinity" + } + if n < -math.MaxFloat64 { + return "-Infinity" + } + + // default format + precision := 2 + decimalStr := "." + thousandStr := "," + positiveStr := "" + negativeStr := "-" + + if len(format) > 0 { + format := []rune(format) + + // If there is an explicit format directive, + // then default values are these: + precision = 9 + thousandStr = "" + + // collect indices of meaningful formatting directives + formatIndx := []int{} + for i, char := range format { + if char != '#' && char != '0' { + formatIndx = append(formatIndx, i) + } + } + + if len(formatIndx) > 0 { + // Directive at index 0: + // Must be a '+' + // Raise an error if not the case + // index: 0123456789 + // +0.000,000 + // +000,000.0 + // +0000.00 + // +0000 + if formatIndx[0] == 0 { + if format[formatIndx[0]] != '+' { + panic("RenderFloat(): invalid positive sign directive") + } + positiveStr = "+" + formatIndx = formatIndx[1:] + } + + // Two directives: + // First is thousands separator + // Raise an error if not followed by 3-digit + // 0123456789 + // 0.000,000 + // 000,000.00 + if len(formatIndx) == 2 { + if (formatIndx[1] - formatIndx[0]) != 4 { + panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") + } + thousandStr = string(format[formatIndx[0]]) + formatIndx = formatIndx[1:] + } + + // One directive: + // Directive is decimal separator + // The number of digit-specifier following the separator indicates wanted precision + // 0123456789 + // 0.00 + // 000,0000 + if len(formatIndx) == 1 { + decimalStr = string(format[formatIndx[0]]) + precision = len(format) - formatIndx[0] - 1 + } + } + } + + // generate sign part + var signStr string + if n >= 0.000000001 { + signStr = positiveStr + } else if n <= -0.000000001 { + signStr = negativeStr + n = -n + } else { + signStr = "" + n = 0.0 + } + + // split number into integer and fractional parts + intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) + + // generate integer part string + intStr := strconv.FormatInt(int64(intf), 10) + + // add thousand separator if required + if len(thousandStr) > 0 { + for i := len(intStr); i > 3; { + i -= 3 + intStr = intStr[:i] + thousandStr + intStr[i:] + } + } + + // no fractional part, we can leave now + if precision == 0 { + return signStr + intStr + } + + // generate fractional part + fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) + // may need padding + if len(fracStr) < precision { + fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr + } + + return signStr + intStr + decimalStr + fracStr +} + +// FormatInteger produces a formatted number as string. +// See FormatFloat. +func FormatInteger(format string, n int) string { + return FormatFloat(format, float64(n)) +} diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go new file mode 100644 index 000000000..43d88a861 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ordinals.go @@ -0,0 +1,25 @@ +package humanize + +import "strconv" + +// Ordinal gives you the input number in a rank/ordinal format. +// +// Ordinal(3) -> 3rd +func Ordinal(x int) string { + suffix := "th" + switch x % 10 { + case 1: + if x%100 != 11 { + suffix = "st" + } + case 2: + if x%100 != 12 { + suffix = "nd" + } + case 3: + if x%100 != 13 { + suffix = "rd" + } + } + return strconv.Itoa(x) + suffix +} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go new file mode 100644 index 000000000..b24e48169 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/si.go @@ -0,0 +1,113 @@ +package humanize + +import ( + "errors" + "math" + "regexp" + "strconv" +) + +var siPrefixTable = map[float64]string{ + -24: "y", // yocto + -21: "z", // zepto + -18: "a", // atto + -15: "f", // femto + -12: "p", // pico + -9: "n", // nano + -6: "µ", // micro + -3: "m", // milli + 0: "", + 3: "k", // kilo + 6: "M", // mega + 9: "G", // giga + 12: "T", // tera + 15: "P", // peta + 18: "E", // exa + 21: "Z", // zetta + 24: "Y", // yotta +} + +var revSIPrefixTable = revfmap(siPrefixTable) + +// revfmap reverses the map and precomputes the power multiplier +func revfmap(in map[float64]string) map[string]float64 { + rv := map[string]float64{} + for k, v := range in { + rv[v] = math.Pow(10, k) + } + return rv +} + +var riParseRegex *regexp.Regexp + +func init() { + ri := `^([\-0-9.]+)\s?([` + for _, v := range siPrefixTable { + ri += v + } + ri += `]?)(.*)` + + riParseRegex = regexp.MustCompile(ri) +} + +// ComputeSI finds the most appropriate SI prefix for the given number +// and returns the prefix along with the value adjusted to be within +// that prefix. +// +// See also: SI, ParseSI. +// +// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") +func ComputeSI(input float64) (float64, string) { + if input == 0 { + return 0, "" + } + mag := math.Abs(input) + exponent := math.Floor(logn(mag, 10)) + exponent = math.Floor(exponent/3) * 3 + + value := mag / math.Pow(10, exponent) + + // Handle special case where value is exactly 1000.0 + // Should return 1 M instead of 1000 k + if value == 1000.0 { + exponent += 3 + value = mag / math.Pow(10, exponent) + } + + value = math.Copysign(value, input) + + prefix := siPrefixTable[exponent] + return value, prefix +} + +// SI returns a string with default formatting. +// +// SI uses Ftoa to format float value, removing trailing zeros. +// +// See also: ComputeSI, ParseSI. +// +// e.g. SI(1000000, "B") -> 1 MB +// e.g. SI(2.2345e-12, "F") -> 2.2345 pF +func SI(input float64, unit string) string { + value, prefix := ComputeSI(input) + return Ftoa(value) + " " + prefix + unit +} + +var errInvalid = errors.New("invalid input") + +// ParseSI parses an SI string back into the number and unit. +// +// See also: SI, ComputeSI. +// +// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil) +func ParseSI(input string) (float64, string, error) { + found := riParseRegex.FindStringSubmatch(input) + if len(found) != 4 { + return 0, "", errInvalid + } + mag := revSIPrefixTable[found[2]] + unit := found[3] + + base, err := strconv.ParseFloat(found[1], 64) + return base * mag, unit, err +} diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go new file mode 100644 index 000000000..b311f11c8 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/times.go @@ -0,0 +1,117 @@ +package humanize + +import ( + "fmt" + "math" + "sort" + "time" +) + +// Seconds-based time units +const ( + Day = 24 * time.Hour + Week = 7 * Day + Month = 30 * Day + Year = 12 * Month + LongTime = 37 * Year +) + +// Time formats a time into a relative string. +// +// Time(someT) -> "3 weeks ago" +func Time(then time.Time) string { + return RelTime(then, time.Now(), "ago", "from now") +} + +// A RelTimeMagnitude struct contains a relative time point at which +// the relative format of time will switch to a new format string. A +// slice of these in ascending order by their "D" field is passed to +// CustomRelTime to format durations. +// +// The Format field is a string that may contain a "%s" which will be +// replaced with the appropriate signed label (e.g. "ago" or "from +// now") and a "%d" that will be replaced by the quantity. +// +// The DivBy field is the amount of time the time difference must be +// divided by in order to display correctly. +// +// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" +// DivBy should be time.Minute so whatever the duration is will be +// expressed in minutes. +type RelTimeMagnitude struct { + D time.Duration + Format string + DivBy time.Duration +} + +var defaultMagnitudes = []RelTimeMagnitude{ + {time.Second, "now", time.Second}, + {2 * time.Second, "1 second %s", 1}, + {time.Minute, "%d seconds %s", time.Second}, + {2 * time.Minute, "1 minute %s", 1}, + {time.Hour, "%d minutes %s", time.Minute}, + {2 * time.Hour, "1 hour %s", 1}, + {Day, "%d hours %s", time.Hour}, + {2 * Day, "1 day %s", 1}, + {Week, "%d days %s", Day}, + {2 * Week, "1 week %s", 1}, + {Month, "%d weeks %s", Week}, + {2 * Month, "1 month %s", 1}, + {Year, "%d months %s", Month}, + {18 * Month, "1 year %s", 1}, + {2 * Year, "2 years %s", 1}, + {LongTime, "%d years %s", Year}, + {math.MaxInt64, "a long while %s", 1}, +} + +// RelTime formats a time into a relative string. +// +// It takes two times and two labels. In addition to the generic time +// delta string (e.g. 5 minutes), the labels are used applied so that +// the label corresponding to the smaller time is applied. +// +// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" +func RelTime(a, b time.Time, albl, blbl string) string { + return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) +} + +// CustomRelTime formats a time into a relative string. +// +// It takes two times two labels and a table of relative time formats. +// In addition to the generic time delta string (e.g. 5 minutes), the +// labels are used applied so that the label corresponding to the +// smaller time is applied. +func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { + lbl := albl + diff := b.Sub(a) + + if a.After(b) { + lbl = blbl + diff = a.Sub(b) + } + + n := sort.Search(len(magnitudes), func(i int) bool { + return magnitudes[i].D >= diff + }) + + if n >= len(magnitudes) { + n = len(magnitudes) - 1 + } + mag := magnitudes[n] + args := []interface{}{} + escaped := false + for _, ch := range mag.Format { + if escaped { + switch ch { + case 's': + args = append(args, lbl) + case 'd': + args = append(args, diff/mag.DivBy) + } + escaped = false + } else { + escaped = ch == '%' + } + } + return fmt.Sprintf(mag.Format, args...) +} diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 000000000..b003eca0c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,8 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 000000000..9171c9722 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 000000000..33c3d2be3 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,64 @@ +# Gorilla WebSocket + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + +[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket) +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) + +### Documentation + +* [API Reference](http://godoc.org/github.com/gorilla/websocket) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + +### Gorilla WebSocket compared with other packages + + + + + + + + + + + + + + + + + + +
github.com/gorillagolang.org/x/net
RFC 6455 Features
Passes Autobahn Test SuiteYesNo
Receive fragmented messageYesNo, see note 1
Send close messageYesNo
Send pings and receive pongsYesNo
Get the type of a received data messageYesYes, see note 2
Other Features
Compression ExtensionsExperimentalNo
Read message using io.ReaderYesNo, see note 3
Write message using io.WriteCloserYesNo, see note 3
+ +Notes: + +1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). +2. The application can get the type of a received data message by implementing + a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) + function. +3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. + Read returns when the input buffer is full or a frame boundary is + encountered. Each call to Write sends a single frame message. The Gorilla + io.Reader and io.WriteCloser operate on a single WebSocket message. + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 000000000..43a87c753 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,392 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "bytes" + "crypto/tls" + "encoding/base64" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +// parseURL parses the URL. +// +// This function is a replacement for the standard library url.Parse function. +// In Go 1.4 and earlier, url.Parse loses information from the path. +func parseURL(s string) (*url.URL, error) { + // From the RFC: + // + // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ] + // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ] + var u url.URL + switch { + case strings.HasPrefix(s, "ws://"): + u.Scheme = "ws" + s = s[len("ws://"):] + case strings.HasPrefix(s, "wss://"): + u.Scheme = "wss" + s = s[len("wss://"):] + default: + return nil, errMalformedURL + } + + if i := strings.Index(s, "?"); i >= 0 { + u.RawQuery = s[i+1:] + s = s[:i] + } + + if i := strings.Index(s, "/"); i >= 0 { + u.Opaque = s[i:] + s = s[:i] + } else { + u.Opaque = "/" + } + + u.Host = s + + if strings.Contains(u.Host, "@") { + // Don't bother parsing user information because user information is + // not allowed in websocket URIs. + return nil, errMalformedURL + } + + return &u, nil +} + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default zero values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, +} + +// Dial creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + + if d == nil { + d = &Dialer{ + Proxy: http.ProxyFromEnvironment, + } + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := parseURL(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover") + } + + hostPort, hostNoPort := hostPortNoPort(u) + + var proxyURL *url.URL + // Check wether the proxy method has been configured + if d.Proxy != nil { + proxyURL, err = d.Proxy(req) + } + if err != nil { + return nil, nil, err + } + + var targetHostPort string + if proxyURL != nil { + targetHostPort, _ = hostPortNoPort(proxyURL) + } else { + targetHostPort = hostPort + } + + var deadline time.Time + if d.HandshakeTimeout != 0 { + deadline = time.Now().Add(d.HandshakeTimeout) + } + + netDial := d.NetDial + if netDial == nil { + netDialer := &net.Dialer{Deadline: deadline} + netDial = netDialer.Dial + } + + netConn, err := netDial("tcp", targetHostPort) + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if err := netConn.SetDeadline(deadline); err != nil { + return nil, nil, err + } + + if proxyURL != nil { + connectHeader := make(http.Header) + if user := proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: hostPort}, + Host: hostPort, + Header: connectHeader, + } + + connectReq.Write(netConn) + + // Read response. + // Okay to use and discard buffered reader here, because + // TLS server will not speak until spoken to. + br := bufio.NewReader(netConn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + return nil, nil, err + } + if resp.StatusCode != 200 { + f := strings.SplitN(resp.Status, " ", 2) + return nil, nil, errors.New(f[1]) + } + } + + if u.Scheme == "https" { + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + if err := tlsConn.Handshake(); err != nil { + return nil, nil, err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return nil, nil, err + } + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go new file mode 100644 index 000000000..4f0d94372 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go new file mode 100644 index 000000000..babb007fb --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone_legacy.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +import "crypto/tls" + +// cloneTLSConfig clones all public fields except the fields +// SessionTicketsDisabled and SessionTicketKey. This avoids copying the +// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a +// config in active use. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 000000000..813ffb1e8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 000000000..97e1dbacb --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1149 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents close frame. +type CloseError struct { + + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan bool // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + readRemaining int64 // bytes remaining in current frame. + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { + return newConnBRW(conn, isServer, readBufferSize, writeBufferSize, nil) +} + +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, brw *bufio.ReadWriter) *Conn { + mu := make(chan bool, 1) + mu <- true + + var br *bufio.Reader + if readBufferSize == 0 && brw != nil && brw.Reader != nil { + // Reuse the supplied bufio.Reader if the buffer has a useful size. + // This code assumes that peek on a reader returns + // bufio.Reader.buf[:0]. + brw.Reader.Reset(conn) + if p, err := brw.Reader.Peek(0); err == nil && cap(p) >= 256 { + br = brw.Reader + } + } + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } + if readBufferSize < maxControlFramePayloadSize { + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + var writeBuf []byte + if writeBufferSize == 0 && brw != nil && brw.Writer != nil { + // Use the bufio.Writer's buffer if the buffer has a useful size. This + // code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + brw.Writer.Reset(&wh) + brw.Writer.WriteByte(0) + brw.Flush() + if cap(wh.p) >= maxFrameHeaderSize+256 { + writeBuf = wh.p[:cap(wh.p)] + } + } + + if writeBuf == nil { + if writeBufferSize == 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBuf = make([]byte, writeBufferSize+maxFrameHeaderSize) + } + + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting for a close frame. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { + <-c.mu + defer func() { c.mu <- true }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + for _, buf := range bufs { + if len(buf) > 0 { + _, err := c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + } + } + + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return nil +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := time.Hour * 1000 + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- true }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +func (c *Conn) prepWrite(messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + return err +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + if err := c.prepWrite(messageType); err != nil { + return nil, err + } + + mw := &messageWriter{ + c: c, + frameType: messageType, + pos: maxFrameHeaderSize, + } + c.writer = mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) fatal(err error) error { + if w.err != nil { + w.err = err + w.c.writer = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.fatal(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return c.writeFatal(errors.New("websocket: internal error, extra used in client mode")) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.fatal(err) + } + + if final { + c.writer = nil + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + if err := w.flushFrame(true, nil); err != nil { + return err + } + w.err = errWriteClosed + return nil +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + if err := c.prepWrite(messageType); err != nil { + return err + } + mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize} + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + final := p[0]&finalBit != 0 + frameType := int(p[0] & 0xf) + mask := p[1]&maskBit != 0 + c.readRemaining = int64(p[1] & 0x7f) + + c.readDecompress = false + if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { + c.readDecompress = true + p[0] &^= rsv1Bit + } + + if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + return noFrame, c.handleProtocolError("control frame length > 125") + } + if !final { + return noFrame, c.handleProtocolError("control frame not final") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + return noFrame, c.handleProtocolError("message start before final message frame") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + return noFrame, c.handleProtocolError("continuation after final message frame") + } + c.readFinal = final + default: + return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) + } + + // 3. Read and parse frame length. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint16(p)) + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint64(p)) + } + + // 4. Handle frame masking. + + if mask != c.isServer { + return noFrame, c.handleProtocolError("incorrect mask flag") + } + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + c.readRemaining = 0 + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("invalid close code") + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + c.readRemaining -= int64(n) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size for a message read from the peer. If a +// message exceeds the limit, the connection sends a close frame to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close frame +// back to the peer. +// +// The application must read the connection to process close messages as +// described in the section on Control Frames above. +// +// The connection read methods return a CloseError when a close frame is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close frame back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := []byte{} + if code != CloseNoStatusReceived { + message = FormatCloseMessage(code, "") + } + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING frame application data. The default +// ping handler sends a pong to the peer. +// +// The application must read the connection to process ping messages as +// described in the section on Control Frames above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG frame application data. The default +// pong handler does nothing. +// +// The application must read the connection to process ping messages as +// described in the section on Control Frames above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +func FormatCloseMessage(closeCode int, text string) []byte { + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/conn_read.go b/vendor/github.com/gorilla/websocket/conn_read.go new file mode 100644 index 000000000..1ea15059e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_read.go @@ -0,0 +1,18 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package websocket + +import "io" + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} diff --git a/vendor/github.com/gorilla/websocket/conn_read_legacy.go b/vendor/github.com/gorilla/websocket/conn_read_legacy.go new file mode 100644 index 000000000..018541cf6 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_read_legacy.go @@ -0,0 +1,21 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package websocket + +import "io" + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + if len(p) > 0 { + // advance over the bytes just read + io.ReadFull(c.br, p) + } + return p, err +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 000000000..e291a952c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,180 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application uses +// the Upgrade function from an Upgrader object with a HTTP request handler +// to get a pointer to a Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// return +// } +// if err = conn.WriteMessage(messageType, p); err != nil { +// return err +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by sending a close message to the +// peer and returning a *CloseError from the the NextReader, ReadMessage or the +// message Read method. +// +// Connections handle received ping and pong messages by invoking callback +// functions set with SetPingHandler and SetPongHandler methods. The callback +// functions are called from the NextReader, ReadMessage and the message Read +// methods. +// +// The default ping handler sends a pong to the peer. The application's reading +// goroutine can block for a short time while the handler writes the pong data +// to the connection. +// +// The application must read the connection to process ping, pong and close +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and not equal to the +// Host request header. +// +// An application can allow connections from any origin by specifying a +// function that always returns true: +// +// var upgrader = websocket.Upgrader{ +// CheckOrigin: func(r *http.Request) bool { return true }, +// } +// +// The deprecated Upgrade function does not enforce an origin policy. It's the +// application's responsibility to check the Origin header before calling +// Upgrade. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 000000000..4f0e36875 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON is deprecated, use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v to the connection. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON is deprecated, use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 000000000..6a88bbc74 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,55 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 000000000..1efffbd1e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,103 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + err error + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan bool, 1) + mu <- true + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 000000000..3495e0f1a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,291 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is set, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, the host in the Origin header must not be set or + // must match the host of the request. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return u.Host == r.Host +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// application negotiated subprotocol (Sec-Websocket-Protocol). +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + if r.Method != "GET" { + return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported") + } + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + var ( + netConn net.Conn + err error + ) + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err = h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + p := c.writeBuf[:0] + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-Websocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-Websocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// This function is deprecated, use websocket.Upgrader instead. +// +// The application is responsible for checking the request origin before +// calling Upgrade. An example implementation of the same origin policy is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", 403) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 000000000..9a4908df2 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,214 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Octet types from RFC 2616. +var octetTypes [256]byte + +const ( + isTokenOctet = 1 << iota + isSpaceOctet +) + +func init() { + // From RFC 2616 + // + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t byte + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpaceOctet + } + if isChar && !isCtl && !isSeparator { + t |= isTokenOctet + } + octetTypes[c] = t + } +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpaceOctet == 0 { + break + } + } + return s[i:] +} + +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isTokenOctet == 0 { + break + } + } + return s[:i], s[i:] +} + +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j += 1 + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j += 1 + } + } + return "", "" + } + } + return "", "" +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains token. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if strings.EqualFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensiosn parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/vendor/github.com/moul/anonuuid/LICENSE b/vendor/github.com/moul/anonuuid/LICENSE new file mode 100644 index 000000000..492e2c629 --- /dev/null +++ b/vendor/github.com/moul/anonuuid/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Manfred Touron + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/moul/anonuuid/Makefile b/vendor/github.com/moul/anonuuid/Makefile new file mode 100644 index 000000000..4e2f3bb44 --- /dev/null +++ b/vendor/github.com/moul/anonuuid/Makefile @@ -0,0 +1,73 @@ +# Project-specific variables +BINARIES ?= anonuuid +CONVEY_PORT ?= 9042 + + +# Common variables +SOURCES := $(shell find . -name "*.go") +COMMANDS := $(shell go list ./... | grep -v /vendor/ | grep /cmd/) +PACKAGES := $(shell go list ./... | grep -v /vendor/ | grep -v /cmd/) +GOENV ?= GO15VENDOREXPERIMENT=1 +GO ?= $(GOENV) go +GODEP ?= $(GOENV) godep +USER ?= $(shell whoami) + + +all: build + + +.PHONY: build +build: $(BINARIES) + + +$(BINARIES): $(SOURCES) + $(GO) build -o $@ ./cmd/$@ + + +.PHONY: test +test: + $(GO) get -t . + $(GO) test -v . + + +.PHONY: godep-save +godep-save: + $(GODEP) save $(PACKAGES) $(COMMANDS) + + +.PHONY: clean +clean: + rm -f $(BINARIES) + + +.PHONY: re +re: clean all + + +.PHONY: convey +convey: + $(GO) get github.com/smartystreets/goconvey + goconvey -cover -port=$(CONVEY_PORT) -workDir="$(realpath .)" -depth=1 + + +.PHONY: cover +cover: profile.out + + +profile.out: $(SOURCES) + rm -f $@ + $(GO) test -covermode=count -coverpkg=. -coverprofile=$@ . + + +.PHONY: docker-build +docker-build: + go get github.com/laher/goxc + rm -rf contrib/docker/linux_386 + for binary in $(BINARIES); do \ + goxc -bc="linux,386" -d . -pv contrib/docker -n $$binary xc; \ + mv contrib/docker/linux_386/$$binary contrib/docker/entrypoint; \ + docker build -t $(USER)/$$binary contrib/docker; \ + docker run -it --rm $(USER)/$$binary || true; \ + docker inspect --type=image --format="{{ .Id }}" moul/$$binary || true; \ + echo "Now you can run 'docker push $(USER)/$$binary'"; \ + done diff --git a/vendor/github.com/moul/anonuuid/README.md b/vendor/github.com/moul/anonuuid/README.md new file mode 100644 index 000000000..f5c9a57eb --- /dev/null +++ b/vendor/github.com/moul/anonuuid/README.md @@ -0,0 +1,170 @@ +# AnonUUID + +[![Build Status](https://travis-ci.org/moul/anonuuid.svg)](https://travis-ci.org/moul/anonuuid) +[![GoDoc](https://godoc.org/github.com/moul/anonuuid?status.svg)](https://godoc.org/github.com/moul/anonuuid) +[![Coverage Status](https://coveralls.io/repos/moul/anonuuid/badge.svg?branch=master&service=github)](https://coveralls.io/github/moul/anonuuid?branch=master) + +:wrench: Anonymize UUIDs outputs (written in Golang) + +![AnonUUID Logo](https://raw.githubusercontent.com/moul/anonuuid/master/assets/anonuuid.png) + +**anonuuid** anonymize an input string by replacing all UUIDs by an anonymized +new one. + +The fake UUIDs are cached, so if AnonUUID encounter the same real UUIDs multiple +times, the translation will be the same. + +## Usage + +```console +$ anonuuid --help +NAME: + anonuuid - Anonymize UUIDs outputs + +USAGE: + anonuuid [global options] command [command options] [arguments...] + +VERSION: + 1.0.0-dev + +AUTHOR(S): + Manfred Touron + +COMMANDS: + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS: + --hexspeak Generate hexspeak style fake UUIDs + --random, -r Generate random fake UUIDs + --keep-beginning Keep first part of the UUID unchanged + --keep-end Keep last part of the UUID unchanged + --prefix, -p Prefix generated UUIDs + --suffix Suffix generated UUIDs + --help, -h show help + --version, -v print the version + ``` + +## Example + +Replace all UUIDs and cache the correspondance. + +```command +$ anonuuid git:(master) ✗ cat < 32 { + part = part[:32] + } + uuid := part[:8] + "-" + part[8:12] + "-1" + part[13:16] + "-" + part[16:20] + "-" + part[20:32] + + err := IsUUID(uuid) + if err != nil { + return "", err + } + + return uuid, nil +} + +// GenerateRandomUUID returns an UUID based on random strings +func GenerateRandomUUID(length int) (string, error) { + var letters = []rune("abcdef0123456789") + + b := make([]rune, length) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return FormatUUID(string(b)) +} + +// GenerateHexspeakUUID returns an UUID formatted string containing hexspeak words +func GenerateHexspeakUUID(i int) (string, error) { + if i < 0 { + i = -i + } + hexspeaks := []string{ + "0ff1ce", + "31337", + "4b1d", + "badc0de", + "badcafe", + "badf00d", + "deadbabe", + "deadbeef", + "deadc0de", + "deadfeed", + "fee1bad", + } + return FormatUUID(hexspeaks[i%len(hexspeaks)]) +} + +// GenerateLenUUID returns an UUID formatted string based on an index number +func GenerateLenUUID(i int) (string, error) { + if i < 0 { + i = 2<<29 + i + } + return FormatUUID(fmt.Sprintf("%x", i)) +} diff --git a/vendor/github.com/moul/gotty-client/LICENSE b/vendor/github.com/moul/gotty-client/LICENSE new file mode 100644 index 000000000..492e2c629 --- /dev/null +++ b/vendor/github.com/moul/gotty-client/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Manfred Touron + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/moul/gotty-client/Makefile b/vendor/github.com/moul/gotty-client/Makefile new file mode 100644 index 000000000..4952ef990 --- /dev/null +++ b/vendor/github.com/moul/gotty-client/Makefile @@ -0,0 +1,81 @@ +# Project-specific variables +BINARIES ?= gotty-client +GOTTY_URL := http://localhost:8081/ +VERSION := $(shell cat .goxc.json | jq -c .PackageVersion | sed 's/"//g') + +CONVEY_PORT ?= 9042 + + +# Common variables +SOURCES := $(shell find . -type f -name "*.go") +COMMANDS := $(shell go list ./... | grep -v /vendor/ | grep /cmd/) +PACKAGES := $(shell go list ./... | grep -v /vendor/ | grep -v /cmd/) +GOENV ?= GO15VENDOREXPERIMENT=1 +GO ?= $(GOENV) go +GODEP ?= $(GOENV) godep +USER ?= $(shell whoami) + + +all: build + + +.PHONY: build +build: $(BINARIES) + + +.PHONY: install +install: + $(GO) install ./cmd/gotty-client + + +$(BINARIES): $(SOURCES) + $(GO) build -o $@ ./cmd/$@ + + +.PHONY: test +test: + $(GO) get -t . + $(GO) test -v . + + +.PHONY: godep-save +godep-save: + $(GODEP) save $(PACKAGES) $(COMMANDS) + + +.PHONY: clean +clean: + rm -f $(BINARIES) + + +.PHONY: re +re: clean all + + +.PHONY: convey +convey: + $(GO) get github.com/smartystreets/goconvey + goconvey -cover -port=$(CONVEY_PORT) -workDir="$(realpath .)" -depth=1 + + +.PHONY: cover +cover: profile.out + + +profile.out: $(SOURCES) + rm -f $@ + $(GO) test -covermode=count -coverpkg=. -coverprofile=$@ . + + +.PHONY: docker-build +docker-build: + go get github.com/laher/goxc + rm -rf contrib/docker/linux_386 + for binary in $(BINARIES); do \ + goxc -bc="linux,386" -d . -pv contrib/docker -n $$binary xc; \ + mv contrib/docker/linux_386/$$binary contrib/docker/entrypoint; \ + docker build -t $(USER)/$$binary contrib/docker; \ + docker run -it --rm $(USER)/$$binary || true; \ + docker inspect --type=image --format="{{ .Id }}" moul/$$binary || true; \ + echo "Now you can run 'docker push $(USER)/$$binary'"; \ + done diff --git a/vendor/github.com/moul/gotty-client/README.md b/vendor/github.com/moul/gotty-client/README.md new file mode 100644 index 000000000..2fb562b32 --- /dev/null +++ b/vendor/github.com/moul/gotty-client/README.md @@ -0,0 +1,201 @@ +# gotty-client +:wrench: Terminal client for [GoTTY](https://github.com/yudai/gotty). + +![](https://raw.githubusercontent.com/moul/gotty-client/master/resources/gotty-client.png) + +[![Build Status](https://travis-ci.org/moul/gotty-client.svg?branch=master)](https://travis-ci.org/moul/gotty-client) +[![GoDoc](https://godoc.org/github.com/moul/gotty-client?status.svg)](https://godoc.org/github.com/moul/gotty-client) + +```ruby + ┌─────────────────┐ + ┌──────▶│ /bin/bash │ + │ └─────────────────┘ + ┌──────────────┐ ┌──────────┐ + │ │ │ Gotty │ +┌───────┐ ┌──▶│ Browser │───────┐ │ │ +│ │ │ │ │ │ │ │ +│ │ │ └──────────────┘ │ │ │ ┌─────────────────┐ +│ Bob │───┤ websockets─▶│ │─▶│ emacs /var/www │ +│ │ │ ╔═ ══ ══ ══ ══ ╗ │ │ │ └─────────────────┘ +│ │ │ ║ ║ │ │ │ +└───────┘ └──▶║ gotty-client ───────┘ │ │ + ║ │ │ + ╚═ ══ ══ ══ ══ ╝ └──────────┘ + │ ┌─────────────────┐ + └──────▶│ tmux attach │ + └─────────────────┘ +``` + +## Example + +Server side ([GoTTY](https://github.com/yudai/gotty)) + +```console +$ gotty -p 9191 sh -c 'while true; do date; sleep 1; done' +2015/08/24 18:54:31 Server is starting with command: sh -c while true; do date; sleep 1; done +2015/08/24 18:54:31 URL: http://[::1]:9191/ +2015/08/24 18:54:34 GET /ws +2015/08/24 18:54:34 New client connected: 127.0.0.1:61811 +2015/08/24 18:54:34 Command is running for client 127.0.0.1:61811 with PID 64834 +2015/08/24 18:54:39 Command exited for: 127.0.0.1:61811 +2015/08/24 18:54:39 Connection closed: 127.0.0.1:61811 +... +``` + +**Client side** + +```console +$ gotty-client http://localhost:9191/ +INFO[0000] New title: GoTTY - sh -c while true; do date; sleep 1; done (jean-michel-van-damme.local) +WARN[0000] Unhandled protocol message: json pref: 2{} +Mon Aug 24 18:54:34 CEST 2015 +Mon Aug 24 18:54:35 CEST 2015 +Mon Aug 24 18:54:36 CEST 2015 +Mon Aug 24 18:54:37 CEST 2015 +Mon Aug 24 18:54:38 CEST 2015 +^C +``` + +## Usage + +```console +$ gotty-client -h +NAME: + gotty-client - GoTTY client for your terminal + +USAGE: + gotty-client [global options] command [command options] GOTTY_URL + +VERSION: + 1.3.0+ + +AUTHOR(S): + Manfred Touron + +COMMANDS: + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS: + --debug, -D Enable debug mode [$GOTTY_CLIENT_DEBUG] + --help, -h show help + --version, -v print the version +``` + +## Install + +Install latest version using Golang (recommended) + +```console +$ go get github.com/moul/gotty-client/cmd/gotty-client +``` + +--- + +Install latest version using Homebrew (Mac OS X) + +```console +$ brew install https://raw.githubusercontent.com/moul/gotty-client/master/contrib/homebrew/gotty-client.rb --HEAD +``` + +or the latest released version + +```console +$ brew install https://raw.githubusercontent.com/moul/gotty-client/master/contrib/homebrew/gotty-client.rb +``` + +## Changelog + +### master (unreleased) + +* No entry + +[full commits list](https://github.com/moul/gotty-client/compare/v1.6.1...master) + +### [v1.6.1](https://github.com/moul/gotty-client/releases/tag/v1.6.1) (2017-01-19) + +* Do not exit on EOF ([#45](https://github.com/moul/gotty-client/pull/45)) ([@gurjeet](https://github.com/gurjeet)) + +[full commits list](https://github.com/moul/gotty-client/compare/v1.6.0...v1.6.1) + +### [v1.6.0](https://github.com/moul/gotty-client/releases/tag/v1.6.0) (2016-05-23) + +* Support of `--use-proxy-from-env` (Add Proxy support) ([#36](https://github.com/moul/gotty-client/pull/36)) ([@byung2](https://github.com/byung2)) +* Add debug mode ([#18](https://github.com/moul/gotty-client/issues/18)) +* Fix argument passing ([#16](https://github.com/moul/gotty-client/issues/16)) +* Remove warnings + golang fixes and refactoring ([@QuentinPerez](https://github.com/QuentinPerez)) + +[full commits list](https://github.com/moul/gotty-client/compare/v1.5.0...v1.6.0) + +### [v1.5.0](https://github.com/moul/gotty-client/releases/tag/v1.5.0) (2016-02-18) + +* Add autocomplete support ([#19](https://github.com/moul/gotty-client/issues/19)) +* Switch from `Party` to `Godep` +* Fix terminal data being interpreted as format string ([#34](https://github.com/moul/gotty-client/pull/34)) ([@mickael9](https://github.com/mickael9)) + +[full commits list](https://github.com/moul/gotty-client/compare/v1.4.0...v1.5.0) + +### [v1.4.0](https://github.com/moul/gotty-client/releases/tag/v1.4.0) (2015-12-09) + +* Remove solaris,plan9,nacl for `.goxc.json` +* Add an error if the go version is lower than 1.5 +* Flexible parsing of the input URL +* Add tests +* Support of `--skip-tls-verify` + +[full commits list](https://github.com/moul/gotty-client/compare/v1.3.0...v1.4.0) + +### [v1.3.0](https://github.com/moul/gotty-client/releases/tag/v1.3.0) (2015-10-27) + +* Fix `connected` state when using `Connect()` + `Loop()` methods +* Add `ExitLoop` which allow to exit from `Loop` function + +[full commits list](https://github.com/moul/gotty-client/compare/v1.2.0...v1.3.0) + +### [v1.2.0](https://github.com/moul/gotty-client/releases/tag/v1.2.0) (2015-10-23) + +* Removed an annoying warning when exiting connection ([#22](https://github.com/moul/gotty-client/issues/22)) ([@QuentinPerez](https://github.com/QuentinPerez)) +* Add the ability to configure alternative stdout ([#21](https://github.com/moul/gotty-client/issues/21)) ([@QuentinPerez](https://github.com/QuentinPerez)) +* Refactored the goroutine system with select, improve speed and stability ([@QuentinPerez](https://github.com/QuentinPerez)) +* Add debug mode (`--debug`/`-D`) ([#18](https://github.com/moul/gotty-client/issues/18)) +* Improve error message when connecting by checking HTTP status code +* Fix arguments passing ([#16](https://github.com/moul/gotty-client/issues/16)) +* Dropped support for golang<1.5 +* Small fixes + +[full commits list](https://github.com/moul/gotty-client/compare/v1.1.0...v1.2.0) + +### [v1.1.0](https://github.com/moul/gotty-client/releases/tag/v1.1.0) (2015-10-10) + +* Handling arguments + using mutexes (thanks to [@QuentinPerez](https://github.com/QuentinPerez)) +* Add logo ([#9](https://github.com/moul/gotty-client/issues/9)) +* Using codegansta/cli for CLI parsing ([#3](https://github.com/moul/gotty-client/issues/3)) +* Fix panic when running on older GoTTY server ([#13](https://github.com/moul/gotty-client/issues/13)) +* Add 'homebrew support' ([#1](https://github.com/moul/gotty-client/issues/1)) +* Add Changelog ([#5](https://github.com/moul/gotty-client/issues/5)) +* Add GOXC configuration to build binaries for multiple architectures ([#2](https://github.com/moul/gotty-client/issues/2)) + +[full commits list](https://github.com/moul/gotty-client/compare/v1.0.1...v1.1.0) + +### [v1.0.1](https://github.com/moul/gotty-client/releases/tag/v1.0.1) (2015-09-27) + +* Using party to manage dependencies + +[full commits list](https://github.com/moul/gotty-client/compare/v1.0.0...v1.0.1) + +### [v1.0.0](https://github.com/moul/gotty-client/releases/tag/v1.0.0) (2015-09-27) + +Compatible with [GoTTY](https://github.com/yudai/gotty) version: [v0.0.10](https://github.com/yudai/gotty/releases/tag/v0.0.10) + +#### Features + +* Support **basic-auth** +* Support **terminal-(re)size** +* Support **write** +* Support **title** +* Support **custom URI** + +[full commits list](https://github.com/moul/gotty-client/compare/cf0c1146c7ce20fe0bd65764c13253bc575cd43a...v1.0.0) + +## License + +MIT diff --git a/vendor/github.com/moul/gotty-client/arch.go b/vendor/github.com/moul/gotty-client/arch.go new file mode 100644 index 000000000..d856fbd2c --- /dev/null +++ b/vendor/github.com/moul/gotty-client/arch.go @@ -0,0 +1,34 @@ +// +build !windows + +package gottyclient + +import ( + "encoding/json" + "fmt" + "os" + "os/signal" + "syscall" + "unsafe" +) + +func notifySignalSIGWINCH(c chan<- os.Signal) { + signal.Notify(c, syscall.SIGWINCH) +} + +func resetSignalSIGWINCH() { + signal.Reset(syscall.SIGWINCH) +} + +func syscallTIOCGWINSZ() ([]byte, error) { + ws := winsize{} + + syscall.Syscall(syscall.SYS_IOCTL, + uintptr(0), uintptr(syscall.TIOCGWINSZ), + uintptr(unsafe.Pointer(&ws))) + + b, err := json.Marshal(ws) + if err != nil { + return nil, fmt.Errorf("json.Marshal error: %v", err) + } + return b, err +} diff --git a/vendor/github.com/moul/gotty-client/arch_windows.go b/vendor/github.com/moul/gotty-client/arch_windows.go new file mode 100644 index 000000000..53aaffc5f --- /dev/null +++ b/vendor/github.com/moul/gotty-client/arch_windows.go @@ -0,0 +1,16 @@ +package gottyclient + +import ( + "errors" + "os" +) + +func notifySignalSIGWINCH(c chan<- os.Signal) { +} + +func resetSignalSIGWINCH() { +} + +func syscallTIOCGWINSZ() ([]byte, error) { + return nil, errors.New("SIGWINCH isn't supported on this ARCH") +} diff --git a/vendor/github.com/moul/gotty-client/glide.lock b/vendor/github.com/moul/gotty-client/glide.lock new file mode 100644 index 000000000..61d5aeee3 --- /dev/null +++ b/vendor/github.com/moul/gotty-client/glide.lock @@ -0,0 +1,37 @@ +hash: 5ba4ef945563e8e85097f2699126b1577f9c667fdbbcdd71604e553ab7dd2a03 +updated: 2017-02-04T23:03:32.03375088+01:00 +imports: +- name: github.com/codegangsta/cli + version: 2ae9042f5bcbaf15b01229f8395ba8e72e01bded +- name: github.com/creack/goselect + version: 40085cf5fd629ccd88dc328895f1f137d09a1de4 +- name: github.com/gopherjs/gopherjs + version: db27c7c470d7404b6b201f82d6fd4821260bd13e + subpackages: + - js +- name: github.com/gorilla/websocket + version: 1f512fc3f05332ba7117626cdfb4e07474e58e60 +- name: github.com/jtolds/gls + version: 8ddce2a84170772b95dd5d576c48d517b22cac63 +- name: github.com/Sirupsen/logrus + version: cd7d1bbe41066b6c1f19780f895901052150a575 +- name: github.com/smartystreets/assertions + version: 40711f7748186bbf9c99977cd89f21ce1a229447 + subpackages: + - internal/go-render/render + - internal/oglematchers +- name: github.com/smartystreets/goconvey + version: d4c757aa9afd1e2fc1832aaab209b5794eb336e1 + subpackages: + - convey + - convey/gotest + - convey/reporting +- name: golang.org/x/crypto + version: 5bcd134fee4dd1475da17714aac19c0aa0142e2f + subpackages: + - ssh/terminal +- name: golang.org/x/sys + version: d4feaf1a7e61e1d9e79e6c4e76c6349e9cab0a03 + subpackages: + - unix +testImports: [] diff --git a/vendor/github.com/moul/gotty-client/glide.yaml b/vendor/github.com/moul/gotty-client/glide.yaml new file mode 100644 index 000000000..287efb9d8 --- /dev/null +++ b/vendor/github.com/moul/gotty-client/glide.yaml @@ -0,0 +1,35 @@ +package: github.com/moul/gotty-client +import: +- package: github.com/Sirupsen/logrus + version: cd7d1bbe41066b6c1f19780f895901052150a575 +- package: github.com/codegangsta/cli + version: 2ae9042f5bcbaf15b01229f8395ba8e72e01bded +- package: github.com/creack/goselect + version: 40085cf5fd629ccd88dc328895f1f137d09a1de4 +- package: github.com/gopherjs/gopherjs + version: db27c7c470d7404b6b201f82d6fd4821260bd13e + subpackages: + - js +- package: github.com/gorilla/websocket + version: 1f512fc3f05332ba7117626cdfb4e07474e58e60 +- package: github.com/jtolds/gls + version: 8ddce2a84170772b95dd5d576c48d517b22cac63 +- package: github.com/smartystreets/assertions + version: 40711f7748186bbf9c99977cd89f21ce1a229447 + subpackages: + - internal/go-render/render + - internal/oglematchers +- package: github.com/smartystreets/goconvey + version: d4c757aa9afd1e2fc1832aaab209b5794eb336e1 + subpackages: + - convey + - convey/gotest + - convey/reporting +- package: golang.org/x/crypto + version: 5bcd134fee4dd1475da17714aac19c0aa0142e2f + subpackages: + - ssh/terminal +- package: golang.org/x/sys + version: d4feaf1a7e61e1d9e79e6c4e76c6349e9cab0a03 + subpackages: + - unix diff --git a/vendor/github.com/moul/gotty-client/gotty-client.go b/vendor/github.com/moul/gotty-client/gotty-client.go new file mode 100644 index 000000000..35e599d9b --- /dev/null +++ b/vendor/github.com/moul/gotty-client/gotty-client.go @@ -0,0 +1,469 @@ +package gottyclient + +import ( + "crypto/tls" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "regexp" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/creack/goselect" + "github.com/gorilla/websocket" + "golang.org/x/crypto/ssh/terminal" +) + +// GetAuthTokenURL transforms a GoTTY http URL to its AuthToken file URL +func GetAuthTokenURL(httpURL string) (*url.URL, *http.Header, error) { + header := http.Header{} + target, err := url.Parse(httpURL) + if err != nil { + return nil, nil, err + } + + target.Path = strings.TrimLeft(target.Path+"auth_token.js", "/") + + if target.User != nil { + header.Add("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(target.User.String()))) + target.User = nil + } + + return target, &header, nil +} + +// GetURLQuery returns url.query +func GetURLQuery(rawurl string) (url.Values, error) { + target, err := url.Parse(rawurl) + if err != nil { + return nil, err + } + return target.Query(), nil +} + +// GetWebsocketURL transforms a GoTTY http URL to its WebSocket URL +func GetWebsocketURL(httpURL string) (*url.URL, *http.Header, error) { + header := http.Header{} + target, err := url.Parse(httpURL) + if err != nil { + return nil, nil, err + } + + if target.Scheme == "https" { + target.Scheme = "wss" + } else { + target.Scheme = "ws" + } + + target.Path = strings.TrimLeft(target.Path+"ws", "/") + + if target.User != nil { + header.Add("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(target.User.String()))) + target.User = nil + } + + return target, &header, nil +} + +type Client struct { + Dialer *websocket.Dialer + Conn *websocket.Conn + URL string + WriteMutex *sync.Mutex + Output io.Writer + poison chan bool + SkipTLSVerify bool + UseProxyFromEnv bool + Connected bool +} + +type querySingleType struct { + AuthToken string `json:"AuthToken"` + Arguments string `json:"Arguments"` +} + +func (c *Client) write(data []byte) error { + c.WriteMutex.Lock() + defer c.WriteMutex.Unlock() + return c.Conn.WriteMessage(websocket.TextMessage, data) +} + +// GetAuthToken retrieves an Auth Token from dynamic auth_token.js file +func (c *Client) GetAuthToken() (string, error) { + target, header, err := GetAuthTokenURL(c.URL) + if err != nil { + return "", err + } + + logrus.Debugf("Fetching auth token auth-token: %q", target.String()) + req, err := http.NewRequest("GET", target.String(), nil) + req.Header = *header + tr := &http.Transport{} + if c.SkipTLSVerify { + conf := &tls.Config{InsecureSkipVerify: true} + tr.TLSClientConfig = conf + } + if c.UseProxyFromEnv { + tr.Proxy = http.ProxyFromEnvironment + } + client := &http.Client{Transport: tr} + resp, err := client.Do(req) + if err != nil { + return "", err + } + + switch resp.StatusCode { + case 200: + // Everything is OK + default: + return "", fmt.Errorf("unknown status code: %d (%s)", resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + re := regexp.MustCompile("var gotty_auth_token = '(.*)'") + output := re.FindStringSubmatch(string(body)) + if len(output) == 0 { + return "", fmt.Errorf("Cannot fetch GoTTY auth-token, please upgrade your GoTTY server.") + } + + return output[1], nil +} + +// Connect tries to dial a websocket server +func (c *Client) Connect() error { + // Retrieve AuthToken + authToken, err := c.GetAuthToken() + if err != nil { + return err + } + logrus.Debugf("Auth-token: %q", authToken) + + // Open WebSocket connection + target, header, err := GetWebsocketURL(c.URL) + if err != nil { + return err + } + logrus.Debugf("Connecting to websocket: %q", target.String()) + if c.SkipTLSVerify { + c.Dialer.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + } + if c.UseProxyFromEnv { + c.Dialer.Proxy = http.ProxyFromEnvironment + } + conn, _, err := c.Dialer.Dial(target.String(), *header) + if err != nil { + return err + } + c.Conn = conn + c.Connected = true + + // Pass arguments and auth-token + query, err := GetURLQuery(c.URL) + if err != nil { + return err + } + querySingle := querySingleType{ + Arguments: "?" + query.Encode(), + AuthToken: authToken, + } + json, err := json.Marshal(querySingle) + if err != nil { + logrus.Errorf("Failed to parse init message %v", err) + return err + } + // Send Json + logrus.Debugf("Sending arguments and auth-token") + err = c.write(json) + if err != nil { + return err + } + + go c.pingLoop() + + return nil +} + +func (c *Client) pingLoop() { + for { + logrus.Debugf("Sending ping") + c.write([]byte("1")) + time.Sleep(30 * time.Second) + } +} + +// Close will nicely close the dialer +func (c *Client) Close() { + c.Conn.Close() +} + +// ExitLoop will kill all goroutines launched by c.Loop() +// ExitLoop() -> wait Loop() -> Close() +func (c *Client) ExitLoop() { + fname := "ExitLoop" + openPoison(fname, c.poison) +} + +// Loop will look indefinitely for new messages +func (c *Client) Loop() error { + if !c.Connected { + err := c.Connect() + if err != nil { + return err + } + } + + wg := &sync.WaitGroup{} + + wg.Add(1) + go c.termsizeLoop(wg) + + wg.Add(1) + go c.readLoop(wg) + + wg.Add(1) + go c.writeLoop(wg) + + /* Wait for all of the above goroutines to finish */ + wg.Wait() + + logrus.Debug("Client.Loop() exiting") + return nil +} + +type winsize struct { + Rows uint16 `json:"rows"` + Columns uint16 `json:"columns"` + // unused + x uint16 + y uint16 +} + +type posionReason int + +const ( + committedSuicide = iota + killed +) + +func openPoison(fname string, poison chan bool) posionReason { + logrus.Debug(fname + " suicide") + + /* + * The close() may raise panic if multiple goroutines commit suicide at the + * same time. Prevent that panic from bubbling up. + */ + defer func() { + if r := recover(); r != nil { + logrus.Debug("Prevented panic() of simultaneous suicides", r) + } + }() + + /* Signal others to die */ + close(poison) + return committedSuicide +} + +func die(fname string, poison chan bool) posionReason { + logrus.Debug(fname + " died") + + wasOpen := <-poison + if wasOpen { + logrus.Error("ERROR: The channel was open when it wasn't suppoed to be") + } + + return killed +} + +func (c *Client) termsizeLoop(wg *sync.WaitGroup) posionReason { + + defer wg.Done() + fname := "termsizeLoop" + + ch := make(chan os.Signal, 1) + notifySignalSIGWINCH(ch) + defer resetSignalSIGWINCH() + + for { + if b, err := syscallTIOCGWINSZ(); err != nil { + logrus.Warn(err) + } else { + if err = c.write(append([]byte("2"), b...)); err != nil { + logrus.Warnf("ws.WriteMessage failed: %v", err) + } + } + select { + case <-c.poison: + /* Somebody poisoned the well; die */ + return die(fname, c.poison) + case <-ch: + } + } +} + +type exposeFd interface { + Fd() uintptr +} + +func (c *Client) writeLoop(wg *sync.WaitGroup) posionReason { + + defer wg.Done() + fname := "writeLoop" + + buff := make([]byte, 128) + oldState, err := terminal.MakeRaw(0) + if err == nil { + defer terminal.Restore(0, oldState) + } + + rdfs := &goselect.FDSet{} + reader := io.Reader(os.Stdin) + for { + select { + case <-c.poison: + /* Somebody poisoned the well; die */ + return die(fname, c.poison) + default: + } + + rdfs.Zero() + rdfs.Set(reader.(exposeFd).Fd()) + err := goselect.Select(1, rdfs, nil, nil, 50*time.Millisecond) + if err != nil { + return openPoison(fname, c.poison) + } + if rdfs.IsSet(reader.(exposeFd).Fd()) { + size, err := reader.Read(buff) + + if err != nil { + if err == io.EOF { + // Send EOF to GoTTY + + // Send 'Input' marker, as defined in GoTTY::client_context.go, + // followed by EOT (a translation of Ctrl-D for terminals) + err = c.write(append([]byte("0"), byte(4))) + + if err != nil { + return openPoison(fname, c.poison) + } + continue + } else { + return openPoison(fname, c.poison) + } + } + + if size <= 0 { + continue + } + + data := buff[:size] + err = c.write(append([]byte("0"), data...)) + if err != nil { + return openPoison(fname, c.poison) + } + } + } +} + +func (c *Client) readLoop(wg *sync.WaitGroup) posionReason { + + defer wg.Done() + fname := "readLoop" + + type MessageNonBlocking struct { + Data []byte + Err error + } + msgChan := make(chan MessageNonBlocking) + + for { + go func() { + _, data, err := c.Conn.ReadMessage() + msgChan <- MessageNonBlocking{Data: data, Err: err} + }() + + select { + case <-c.poison: + /* Somebody poisoned the well; die */ + return die(fname, c.poison) + case msg := <-msgChan: + if msg.Err != nil { + + if _, ok := msg.Err.(*websocket.CloseError); !ok { + logrus.Warnf("c.Conn.ReadMessage: %v", msg.Err) + } + return openPoison(fname, c.poison) + } + if len(msg.Data) == 0 { + + logrus.Warnf("An error has occured") + return openPoison(fname, c.poison) + } + switch msg.Data[0] { + case '0': // data + buf, err := base64.StdEncoding.DecodeString(string(msg.Data[1:])) + if err != nil { + logrus.Warnf("Invalid base64 content: %q", msg.Data[1:]) + break + } + c.Output.Write(buf) + case '1': // pong + case '2': // new title + newTitle := string(msg.Data[1:]) + fmt.Fprintf(c.Output, "\033]0;%s\007", newTitle) + case '3': // json prefs + logrus.Debugf("Unhandled protocol message: json pref: %s", string(msg.Data[1:])) + case '4': // autoreconnect + logrus.Debugf("Unhandled protocol message: autoreconnect: %s", string(msg.Data)) + default: + logrus.Warnf("Unhandled protocol message: %s", string(msg.Data)) + } + } + } +} + +// SetOutput changes the output stream +func (c *Client) SetOutput(w io.Writer) { + c.Output = w +} + +// ParseURL parses an URL which may be incomplete and tries to standardize it +func ParseURL(input string) (string, error) { + parsed, err := url.Parse(input) + if err != nil { + return "", err + } + switch parsed.Scheme { + case "http", "https": + // everything is ok + default: + return ParseURL(fmt.Sprintf("http://%s", input)) + } + return parsed.String(), nil +} + +// NewClient returns a GoTTY client object +func NewClient(inputURL string) (*Client, error) { + url, err := ParseURL(inputURL) + if err != nil { + return nil, err + } + return &Client{ + Dialer: &websocket.Dialer{}, + URL: url, + WriteMutex: &sync.Mutex{}, + Output: os.Stdout, + poison: make(chan bool), + }, nil +} diff --git a/vendor/github.com/renstrom/fuzzysearch/LICENSE b/vendor/github.com/renstrom/fuzzysearch/LICENSE new file mode 100644 index 000000000..9cc753370 --- /dev/null +++ b/vendor/github.com/renstrom/fuzzysearch/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Peter Renström + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/renstrom/fuzzysearch/fuzzy/fuzzy.go b/vendor/github.com/renstrom/fuzzysearch/fuzzy/fuzzy.go new file mode 100644 index 000000000..63277d51e --- /dev/null +++ b/vendor/github.com/renstrom/fuzzysearch/fuzzy/fuzzy.go @@ -0,0 +1,167 @@ +// Fuzzy searching allows for flexibly matching a string with partial input, +// useful for filtering data very quickly based on lightweight user input. +package fuzzy + +import ( + "unicode" + "unicode/utf8" +) + +var noop = func(r rune) rune { return r } + +// Match returns true if source matches target using a fuzzy-searching +// algorithm. Note that it doesn't implement Levenshtein distance (see +// RankMatch instead), but rather a simplified version where there's no +// approximation. The method will return true only if each character in the +// source can be found in the target and occurs after the preceding matches. +func Match(source, target string) bool { + return match(source, target, noop) +} + +// MatchFold is a case-insensitive version of Match. +func MatchFold(source, target string) bool { + return match(source, target, unicode.ToLower) +} + +func match(source, target string, fn func(rune) rune) bool { + lenDiff := len(target) - len(source) + + if lenDiff < 0 { + return false + } + + if lenDiff == 0 && source == target { + return true + } + +Outer: + for _, r1 := range source { + for i, r2 := range target { + if fn(r1) == fn(r2) { + target = target[i+utf8.RuneLen(r2):] + continue Outer + } + } + return false + } + + return true +} + +// Find will return a list of strings in targets that fuzzy matches source. +func Find(source string, targets []string) []string { + return find(source, targets, noop) +} + +// FindFold is a case-insensitive version of Find. +func FindFold(source string, targets []string) []string { + return find(source, targets, unicode.ToLower) +} + +func find(source string, targets []string, fn func(rune) rune) []string { + var matches []string + + for _, target := range targets { + if match(source, target, fn) { + matches = append(matches, target) + } + } + + return matches +} + +// RankMatch is similar to Match except it will measure the Levenshtein +// distance between the source and the target and return its result. If there +// was no match, it will return -1. +// Given the requirements of match, RankMatch only needs to perform a subset of +// the Levenshtein calculation, only deletions need be considered, required +// additions and substitutions would fail the match test. +func RankMatch(source, target string) int { + return rank(source, target, noop) +} + +// RankMatchFold is a case-insensitive version of RankMatch. +func RankMatchFold(source, target string) int { + return rank(source, target, unicode.ToLower) +} + +func rank(source, target string, fn func(rune) rune) int { + lenDiff := len(target) - len(source) + + if lenDiff < 0 { + return -1 + } + + if lenDiff == 0 && source == target { + return 0 + } + + runeDiff := 0 + +Outer: + for _, r1 := range source { + for i, r2 := range target { + if fn(r1) == fn(r2) { + target = target[i+utf8.RuneLen(r2):] + continue Outer + } else { + runeDiff++ + } + } + return -1 + } + + // Count up remaining char + for len(target) > 0 { + target = target[utf8.RuneLen(rune(target[0])):] + runeDiff++ + } + + return runeDiff +} + +// RankFind is similar to Find, except it will also rank all matches using +// Levenshtein distance. +func RankFind(source string, targets []string) Ranks { + var r Ranks + for _, target := range find(source, targets, noop) { + distance := LevenshteinDistance(source, target) + r = append(r, Rank{source, target, distance}) + } + return r +} + +// RankFindFold is a case-insensitive version of RankFind. +func RankFindFold(source string, targets []string) Ranks { + var r Ranks + for _, target := range find(source, targets, unicode.ToLower) { + distance := LevenshteinDistance(source, target) + r = append(r, Rank{source, target, distance}) + } + return r +} + +type Rank struct { + // Source is used as the source for matching. + Source string + + // Target is the word matched against. + Target string + + // Distance is the Levenshtein distance between Source and Target. + Distance int +} + +type Ranks []Rank + +func (r Ranks) Len() int { + return len(r) +} + +func (r Ranks) Swap(i, j int) { + r[i], r[j] = r[j], r[i] +} + +func (r Ranks) Less(i, j int) bool { + return r[i].Distance < r[j].Distance +} diff --git a/vendor/github.com/renstrom/fuzzysearch/fuzzy/levenshtein.go b/vendor/github.com/renstrom/fuzzysearch/fuzzy/levenshtein.go new file mode 100644 index 000000000..237923d34 --- /dev/null +++ b/vendor/github.com/renstrom/fuzzysearch/fuzzy/levenshtein.go @@ -0,0 +1,43 @@ +package fuzzy + +// LevenshteinDistance measures the difference between two strings. +// The Levenshtein distance between two words is the minimum number of +// single-character edits (i.e. insertions, deletions or substitutions) +// required to change one word into the other. +// +// This implemention is optimized to use O(min(m,n)) space and is based on the +// optimized C version found here: +// http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#C +func LevenshteinDistance(s, t string) int { + r1, r2 := []rune(s), []rune(t) + column := make([]int, len(r1)+1) + + for y := 1; y <= len(r1); y++ { + column[y] = y + } + + for x := 1; x <= len(r2); x++ { + column[0] = x + + for y, lastDiag := 1, x-1; y <= len(r1); y++ { + oldDiag := column[y] + cost := 0 + if r1[y-1] != r2[x-1] { + cost = 1 + } + column[y] = min(column[y]+1, column[y-1]+1, lastDiag+cost) + lastDiag = oldDiag + } + } + + return column[len(r1)] +} + +func min(a, b, c int) int { + if a < b && a < c { + return a + } else if b < c { + return b + } + return c +} diff --git a/vendor/github.com/scaleway/scaleway-cli/LICENSE.md b/vendor/github.com/scaleway/scaleway-cli/LICENSE.md new file mode 100644 index 000000000..7503a16ca --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/LICENSE.md @@ -0,0 +1,22 @@ +The MIT License +=============== + +Copyright (c) **2014-2016 Scaleway ([@scaleway](https://twitter.com/scaleway))** + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/README.md b/vendor/github.com/scaleway/scaleway-cli/pkg/api/README.md new file mode 100644 index 000000000..559a7018d --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/README.md @@ -0,0 +1,25 @@ +# Scaleway's API + +[![GoDoc](https://godoc.org/github.com/scaleway/scaleway-cli/pkg/api?status.svg)](https://godoc.org/github.com/scaleway/scaleway-cli/pkg/api) + +This package contains facilities to play with the Scaleway API, it includes the following features: + +- dedicated configuration file containing credentials to deal with the API +- caching to resolve UUIDs without contacting the API + +## Links + +- [API documentation](https://developer.scaleway.com) +- [Official Python SDK](https://github.com/scaleway/python-scaleway) +- Projects using this SDK + - https://github.com/scaleway/devhub + - https://github.com/scaleway/docker-machine-driver-scaleway + - https://github.com/scaleway-community/scaleway-ubuntu-coreos/blob/master/overlay/usr/local/update-firewall/scw-api/cache.go + - https://github.com/pulcy/quark + - https://github.com/hex-sh/terraform-provider-scaleway + - https://github.com/tscolari/bosh-scaleway-cpi +- Other **golang** clients + - https://github.com/lalyos/onlabs + - https://github.com/meatballhat/packer-builder-onlinelabs + - https://github.com/nlamirault/go-scaleway + - https://github.com/golang/build/blob/master/cmd/scaleway/scaleway.go diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go new file mode 100644 index 000000000..5ce0157dc --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go @@ -0,0 +1,2754 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +// Interact with Scaleway API + +// Package api contains client and functions to interact with Scaleway API +package api + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "sort" + "strconv" + "strings" + "text/tabwriter" + "text/template" + "time" + + "golang.org/x/sync/errgroup" +) + +// Default values +var ( + AccountAPI = "https://account.scaleway.com/" + MetadataAPI = "http://169.254.42.42/" + MarketplaceAPI = "https://api-marketplace.scaleway.com" + ComputeAPIPar1 = "https://cp-par1.scaleway.com/" + ComputeAPIAms1 = "https://cp-ams1.scaleway.com" + + URLPublicDNS = ".pub.cloud.scaleway.com" + URLPrivateDNS = ".priv.cloud.scaleway.com" +) + +func init() { + if url := os.Getenv("SCW_ACCOUNT_API"); url != "" { + AccountAPI = url + } + if url := os.Getenv("SCW_METADATA_API"); url != "" { + MetadataAPI = url + } + if url := os.Getenv("SCW_MARKETPLACE_API"); url != "" { + MarketplaceAPI = url + } +} + +const ( + perPage = 50 +) + +// ScalewayAPI is the interface used to communicate with the Scaleway API +type ScalewayAPI struct { + // Organization is the identifier of the Scaleway organization + Organization string + + // Token is the authentication token for the Scaleway organization + Token string + + // Password is the authentication password + password string + + userAgent string + + // Cache is used to quickly resolve identifiers from names + Cache *ScalewayCache + + client *http.Client + verbose bool + computeAPI string + + Region string + // + Logger +} + +// ScalewayAPIError represents a Scaleway API Error +type ScalewayAPIError struct { + // Message is a human-friendly error message + APIMessage string `json:"message,omitempty"` + + // Type is a string code that defines the kind of error + Type string `json:"type,omitempty"` + + // Fields contains detail about validation error + Fields map[string][]string `json:"fields,omitempty"` + + // StatusCode is the HTTP status code received + StatusCode int `json:"-"` + + // Message + Message string `json:"-"` +} + +// Error returns a string representing the error +func (e ScalewayAPIError) Error() string { + var b bytes.Buffer + + fmt.Fprintf(&b, "StatusCode: %v, ", e.StatusCode) + fmt.Fprintf(&b, "Type: %v, ", e.Type) + fmt.Fprintf(&b, "APIMessage: \x1b[31m%v\x1b[0m", e.APIMessage) + if len(e.Fields) > 0 { + fmt.Fprintf(&b, ", Details: %v", e.Fields) + } + return b.String() +} + +// HideAPICredentials removes API credentials from a string +func (s *ScalewayAPI) HideAPICredentials(input string) string { + output := input + if s.Token != "" { + output = strings.Replace(output, s.Token, "00000000-0000-4000-8000-000000000000", -1) + } + if s.Organization != "" { + output = strings.Replace(output, s.Organization, "00000000-0000-5000-9000-000000000000", -1) + } + if s.password != "" { + output = strings.Replace(output, s.password, "XX-XX-XX-XX", -1) + } + return output +} + +// ScalewayIPAddress represents a Scaleway IP address +type ScalewayIPAddress struct { + // Identifier is a unique identifier for the IP address + Identifier string `json:"id,omitempty"` + + // IP is an IPv4 address + IP string `json:"address,omitempty"` + + // Dynamic is a flag that defines an IP that change on each reboot + Dynamic *bool `json:"dynamic,omitempty"` +} + +// ScalewayVolume represents a Scaleway Volume +type ScalewayVolume struct { + // Identifier is a unique identifier for the volume + Identifier string `json:"id,omitempty"` + + // Size is the allocated size of the volume + Size uint64 `json:"size,omitempty"` + + // CreationDate is the creation date of the volume + CreationDate string `json:"creation_date,omitempty"` + + // ModificationDate is the date of the last modification of the volume + ModificationDate string `json:"modification_date,omitempty"` + + // Organization is the organization owning the volume + Organization string `json:"organization,omitempty"` + + // Name is the name of the volume + Name string `json:"name,omitempty"` + + // Server is the server using this image + Server *struct { + Identifier string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + } `json:"server,omitempty"` + + // VolumeType is a Scaleway identifier for the kind of volume (default: l_ssd) + VolumeType string `json:"volume_type,omitempty"` + + // ExportURI represents the url used by initrd/scripts to attach the volume + ExportURI string `json:"export_uri,omitempty"` +} + +// ScalewayOneVolume represents the response of a GET /volumes/UUID API call +type ScalewayOneVolume struct { + Volume ScalewayVolume `json:"volume,omitempty"` +} + +// ScalewayVolumes represents a group of Scaleway volumes +type ScalewayVolumes struct { + // Volumes holds scaleway volumes of the response + Volumes []ScalewayVolume `json:"volumes,omitempty"` +} + +// ScalewayVolumeDefinition represents a Scaleway volume definition +type ScalewayVolumeDefinition struct { + // Name is the user-defined name of the volume + Name string `json:"name"` + + // Image is the image used by the volume + Size uint64 `json:"size"` + + // Bootscript is the bootscript used by the volume + Type string `json:"volume_type"` + + // Organization is the owner of the volume + Organization string `json:"organization"` +} + +// ScalewayVolumePutDefinition represents a Scaleway volume with nullable fields (for PUT) +type ScalewayVolumePutDefinition struct { + Identifier *string `json:"id,omitempty"` + Size *uint64 `json:"size,omitempty"` + CreationDate *string `json:"creation_date,omitempty"` + ModificationDate *string `json:"modification_date,omitempty"` + Organization *string `json:"organization,omitempty"` + Name *string `json:"name,omitempty"` + Server struct { + Identifier *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + } `json:"server,omitempty"` + VolumeType *string `json:"volume_type,omitempty"` + ExportURI *string `json:"export_uri,omitempty"` +} + +// ScalewayImage represents a Scaleway Image +type ScalewayImage struct { + // Identifier is a unique identifier for the image + Identifier string `json:"id,omitempty"` + + // Name is a user-defined name for the image + Name string `json:"name,omitempty"` + + // CreationDate is the creation date of the image + CreationDate string `json:"creation_date,omitempty"` + + // ModificationDate is the date of the last modification of the image + ModificationDate string `json:"modification_date,omitempty"` + + // RootVolume is the root volume bound to the image + RootVolume ScalewayVolume `json:"root_volume,omitempty"` + + // Public is true for public images and false for user images + Public bool `json:"public,omitempty"` + + // Bootscript is the bootscript bound to the image + DefaultBootscript *ScalewayBootscript `json:"default_bootscript,omitempty"` + + // Organization is the owner of the image + Organization string `json:"organization,omitempty"` + + // Arch is the architecture target of the image + Arch string `json:"arch,omitempty"` + + // FIXME: extra_volumes +} + +// ScalewayImageIdentifier represents a Scaleway Image Identifier +type ScalewayImageIdentifier struct { + Identifier string + Arch string + Region string + Owner string +} + +// ScalewayOneImage represents the response of a GET /images/UUID API call +type ScalewayOneImage struct { + Image ScalewayImage `json:"image,omitempty"` +} + +// ScalewayImages represents a group of Scaleway images +type ScalewayImages struct { + // Images holds scaleway images of the response + Images []ScalewayImage `json:"images,omitempty"` +} + +// ScalewaySnapshot represents a Scaleway Snapshot +type ScalewaySnapshot struct { + // Identifier is a unique identifier for the snapshot + Identifier string `json:"id,omitempty"` + + // Name is a user-defined name for the snapshot + Name string `json:"name,omitempty"` + + // CreationDate is the creation date of the snapshot + CreationDate string `json:"creation_date,omitempty"` + + // ModificationDate is the date of the last modification of the snapshot + ModificationDate string `json:"modification_date,omitempty"` + + // Size is the allocated size of the volume + Size uint64 `json:"size,omitempty"` + + // Organization is the owner of the snapshot + Organization string `json:"organization"` + + // State is the current state of the snapshot + State string `json:"state"` + + // VolumeType is the kind of volume behind the snapshot + VolumeType string `json:"volume_type"` + + // BaseVolume is the volume from which the snapshot inherits + BaseVolume ScalewayVolume `json:"base_volume,omitempty"` +} + +// ScalewayOneSnapshot represents the response of a GET /snapshots/UUID API call +type ScalewayOneSnapshot struct { + Snapshot ScalewaySnapshot `json:"snapshot,omitempty"` +} + +// ScalewaySnapshots represents a group of Scaleway snapshots +type ScalewaySnapshots struct { + // Snapshots holds scaleway snapshots of the response + Snapshots []ScalewaySnapshot `json:"snapshots,omitempty"` +} + +// ScalewayBootscript represents a Scaleway Bootscript +type ScalewayBootscript struct { + Bootcmdargs string `json:"bootcmdargs,omitempty"` + Dtb string `json:"dtb,omitempty"` + Initrd string `json:"initrd,omitempty"` + Kernel string `json:"kernel,omitempty"` + + // Arch is the architecture target of the bootscript + Arch string `json:"architecture,omitempty"` + + // Identifier is a unique identifier for the bootscript + Identifier string `json:"id,omitempty"` + + // Organization is the owner of the bootscript + Organization string `json:"organization,omitempty"` + + // Name is a user-defined name for the bootscript + Title string `json:"title,omitempty"` + + // Public is true for public bootscripts and false for user bootscripts + Public bool `json:"public,omitempty"` + + Default bool `json:"default,omitempty"` +} + +// ScalewayOneBootscript represents the response of a GET /bootscripts/UUID API call +type ScalewayOneBootscript struct { + Bootscript ScalewayBootscript `json:"bootscript,omitempty"` +} + +// ScalewayBootscripts represents a group of Scaleway bootscripts +type ScalewayBootscripts struct { + // Bootscripts holds Scaleway bootscripts of the response + Bootscripts []ScalewayBootscript `json:"bootscripts,omitempty"` +} + +// ScalewayTask represents a Scaleway Task +type ScalewayTask struct { + // Identifier is a unique identifier for the task + Identifier string `json:"id,omitempty"` + + // StartDate is the start date of the task + StartDate string `json:"started_at,omitempty"` + + // TerminationDate is the termination date of the task + TerminationDate string `json:"terminated_at,omitempty"` + + HrefFrom string `json:"href_from,omitempty"` + + Description string `json:"description,omitempty"` + + Status string `json:"status,omitempty"` + + Progress int `json:"progress,omitempty"` +} + +// ScalewayOneTask represents the response of a GET /tasks/UUID API call +type ScalewayOneTask struct { + Task ScalewayTask `json:"task,omitempty"` +} + +// ScalewayTasks represents a group of Scaleway tasks +type ScalewayTasks struct { + // Tasks holds scaleway tasks of the response + Tasks []ScalewayTask `json:"tasks,omitempty"` +} + +// ScalewaySecurityGroupRule definition +type ScalewaySecurityGroupRule struct { + Direction string `json:"direction"` + Protocol string `json:"protocol"` + IPRange string `json:"ip_range"` + DestPortFrom int `json:"dest_port_from,omitempty"` + Action string `json:"action"` + Position int `json:"position"` + DestPortTo string `json:"dest_port_to"` + Editable bool `json:"editable"` + ID string `json:"id"` +} + +// ScalewayGetSecurityGroupRules represents the response of a GET /security_group/{groupID}/rules +type ScalewayGetSecurityGroupRules struct { + Rules []ScalewaySecurityGroupRule `json:"rules"` +} + +// ScalewayGetSecurityGroupRule represents the response of a GET /security_group/{groupID}/rules/{ruleID} +type ScalewayGetSecurityGroupRule struct { + Rules ScalewaySecurityGroupRule `json:"rule"` +} + +// ScalewayNewSecurityGroupRule definition POST/PUT request /security_group/{groupID} +type ScalewayNewSecurityGroupRule struct { + Action string `json:"action"` + Direction string `json:"direction"` + IPRange string `json:"ip_range"` + Protocol string `json:"protocol"` + DestPortFrom int `json:"dest_port_from,omitempty"` +} + +// ScalewaySecurityGroups definition +type ScalewaySecurityGroups struct { + Description string `json:"description"` + ID string `json:"id"` + Organization string `json:"organization"` + Name string `json:"name"` + Servers []ScalewaySecurityGroup `json:"servers"` + EnableDefaultSecurity bool `json:"enable_default_security"` + OrganizationDefault bool `json:"organization_default"` +} + +// ScalewayGetSecurityGroups represents the response of a GET /security_groups/ +type ScalewayGetSecurityGroups struct { + SecurityGroups []ScalewaySecurityGroups `json:"security_groups"` +} + +// ScalewayGetSecurityGroup represents the response of a GET /security_groups/{groupID} +type ScalewayGetSecurityGroup struct { + SecurityGroups ScalewaySecurityGroups `json:"security_group"` +} + +// ScalewayIPDefinition represents the IP's fields +type ScalewayIPDefinition struct { + Organization string `json:"organization"` + Reverse *string `json:"reverse"` + ID string `json:"id"` + Server *struct { + Identifier string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + } `json:"server"` + Address string `json:"address"` +} + +// ScalewayGetIPS represents the response of a GET /ips/ +type ScalewayGetIPS struct { + IPS []ScalewayIPDefinition `json:"ips"` +} + +// ScalewayGetIP represents the response of a GET /ips/{id_ip} +type ScalewayGetIP struct { + IP ScalewayIPDefinition `json:"ip"` +} + +// ScalewaySecurityGroup represents a Scaleway security group +type ScalewaySecurityGroup struct { + // Identifier is a unique identifier for the security group + Identifier string `json:"id,omitempty"` + + // Name is the user-defined name of the security group + Name string `json:"name,omitempty"` +} + +// ScalewayNewSecurityGroup definition POST request /security_groups +type ScalewayNewSecurityGroup struct { + Organization string `json:"organization"` + Name string `json:"name"` + Description string `json:"description"` +} + +// ScalewayUpdateSecurityGroup definition PUT request /security_groups +type ScalewayUpdateSecurityGroup struct { + Organization string `json:"organization"` + Name string `json:"name"` + Description string `json:"description"` + OrganizationDefault bool `json:"organization_default"` +} + +// ScalewayServer represents a Scaleway server +type ScalewayServer struct { + // Arch is the architecture target of the server + Arch string `json:"arch,omitempty"` + + // Identifier is a unique identifier for the server + Identifier string `json:"id,omitempty"` + + // Name is the user-defined name of the server + Name string `json:"name,omitempty"` + + // CreationDate is the creation date of the server + CreationDate string `json:"creation_date,omitempty"` + + // ModificationDate is the date of the last modification of the server + ModificationDate string `json:"modification_date,omitempty"` + + // Image is the image used by the server + Image ScalewayImage `json:"image,omitempty"` + + // DynamicIPRequired is a flag that defines a server with a dynamic ip address attached + DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` + + // PublicIP is the public IP address bound to the server + PublicAddress ScalewayIPAddress `json:"public_ip,omitempty"` + + // State is the current status of the server + State string `json:"state,omitempty"` + + // StateDetail is the detailed status of the server + StateDetail string `json:"state_detail,omitempty"` + + // PrivateIP represents the private IPV4 attached to the server (changes on each boot) + PrivateIP string `json:"private_ip,omitempty"` + + // Bootscript is the unique identifier of the selected bootscript + Bootscript *ScalewayBootscript `json:"bootscript,omitempty"` + + // Hostname represents the ServerName in a format compatible with unix's hostname + Hostname string `json:"hostname,omitempty"` + + // Tags represents user-defined tags + Tags []string `json:"tags,omitempty"` + + // Volumes are the attached volumes + Volumes map[string]ScalewayVolume `json:"volumes,omitempty"` + + // SecurityGroup is the selected security group object + SecurityGroup ScalewaySecurityGroup `json:"security_group,omitempty"` + + // Organization is the owner of the server + Organization string `json:"organization,omitempty"` + + // CommercialType is the commercial type of the server (i.e: C1, C2[SML], VC1S) + CommercialType string `json:"commercial_type,omitempty"` + + // Location of the server + Location struct { + Platform string `json:"platform_id,omitempty"` + Chassis string `json:"chassis_id,omitempty"` + Cluster string `json:"cluster_id,omitempty"` + Hypervisor string `json:"hypervisor_id,omitempty"` + Blade string `json:"blade_id,omitempty"` + Node string `json:"node_id,omitempty"` + ZoneID string `json:"zone_id,omitempty"` + } `json:"location,omitempty"` + + IPV6 *ScalewayIPV6Definition `json:"ipv6,omitempty"` + + EnableIPV6 bool `json:"enable_ipv6,omitempty"` + + // This fields are not returned by the API, we generate it + DNSPublic string `json:"dns_public,omitempty"` + DNSPrivate string `json:"dns_private,omitempty"` +} + +// ScalewayIPV6Definition represents a Scaleway ipv6 +type ScalewayIPV6Definition struct { + Netmask string `json:"netmask"` + Gateway string `json:"gateway"` + Address string `json:"address"` +} + +// ScalewayServerPatchDefinition represents a Scaleway server with nullable fields (for PATCH) +type ScalewayServerPatchDefinition struct { + Arch *string `json:"arch,omitempty"` + Name *string `json:"name,omitempty"` + CreationDate *string `json:"creation_date,omitempty"` + ModificationDate *string `json:"modification_date,omitempty"` + Image *ScalewayImage `json:"image,omitempty"` + DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` + PublicAddress *ScalewayIPAddress `json:"public_ip,omitempty"` + State *string `json:"state,omitempty"` + StateDetail *string `json:"state_detail,omitempty"` + PrivateIP *string `json:"private_ip,omitempty"` + Bootscript *string `json:"bootscript,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Volumes *map[string]ScalewayVolume `json:"volumes,omitempty"` + SecurityGroup *ScalewaySecurityGroup `json:"security_group,omitempty"` + Organization *string `json:"organization,omitempty"` + Tags *[]string `json:"tags,omitempty"` + IPV6 *ScalewayIPV6Definition `json:"ipv6,omitempty"` + EnableIPV6 *bool `json:"enable_ipv6,omitempty"` +} + +// ScalewayServerDefinition represents a Scaleway server with image definition +type ScalewayServerDefinition struct { + // Name is the user-defined name of the server + Name string `json:"name"` + + // Image is the image used by the server + Image *string `json:"image,omitempty"` + + // Volumes are the attached volumes + Volumes map[string]string `json:"volumes,omitempty"` + + // DynamicIPRequired is a flag that defines a server with a dynamic ip address attached + DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` + + // Bootscript is the bootscript used by the server + Bootscript *string `json:"bootscript"` + + // Tags are the metadata tags attached to the server + Tags []string `json:"tags,omitempty"` + + // Organization is the owner of the server + Organization string `json:"organization"` + + // CommercialType is the commercial type of the server (i.e: C1, C2[SML], VC1S) + CommercialType string `json:"commercial_type"` + + PublicIP string `json:"public_ip,omitempty"` + + EnableIPV6 bool `json:"enable_ipv6,omitempty"` + + SecurityGroup string `json:"security_group,omitempty"` +} + +// ScalewayOneServer represents the response of a GET /servers/UUID API call +type ScalewayOneServer struct { + Server ScalewayServer `json:"server,omitempty"` +} + +// ScalewayServers represents a group of Scaleway servers +type ScalewayServers struct { + // Servers holds scaleway servers of the response + Servers []ScalewayServer `json:"servers,omitempty"` +} + +// ScalewayServerAction represents an action to perform on a Scaleway server +type ScalewayServerAction struct { + // Action is the name of the action to trigger + Action string `json:"action,omitempty"` +} + +// ScalewaySnapshotDefinition represents a Scaleway snapshot definition +type ScalewaySnapshotDefinition struct { + VolumeIDentifier string `json:"volume_id"` + Name string `json:"name,omitempty"` + Organization string `json:"organization"` +} + +// ScalewayImageDefinition represents a Scaleway image definition +type ScalewayImageDefinition struct { + SnapshotIDentifier string `json:"root_volume"` + Name string `json:"name,omitempty"` + Organization string `json:"organization"` + Arch string `json:"arch"` + DefaultBootscript *string `json:"default_bootscript,omitempty"` +} + +// ScalewayRoleDefinition represents a Scaleway Token UserId Role +type ScalewayRoleDefinition struct { + Organization ScalewayOrganizationDefinition `json:"organization,omitempty"` + Role string `json:"role,omitempty"` +} + +// ScalewayTokenDefinition represents a Scaleway Token +type ScalewayTokenDefinition struct { + UserID string `json:"user_id"` + Description string `json:"description,omitempty"` + Roles ScalewayRoleDefinition `json:"roles"` + Expires string `json:"expires"` + InheritsUsersPerms bool `json:"inherits_user_perms"` + ID string `json:"id"` +} + +// ScalewayTokensDefinition represents a Scaleway Tokens +type ScalewayTokensDefinition struct { + Token ScalewayTokenDefinition `json:"token"` +} + +// ScalewayGetTokens represents a list of Scaleway Tokens +type ScalewayGetTokens struct { + Tokens []ScalewayTokenDefinition `json:"tokens"` +} + +// ScalewayContainerData represents a Scaleway container data (S3) +type ScalewayContainerData struct { + LastModified string `json:"last_modified"` + Name string `json:"name"` + Size string `json:"size"` +} + +// ScalewayGetContainerDatas represents a list of Scaleway containers data (S3) +type ScalewayGetContainerDatas struct { + Container []ScalewayContainerData `json:"container"` +} + +// ScalewayContainer represents a Scaleway container (S3) +type ScalewayContainer struct { + ScalewayOrganizationDefinition `json:"organization"` + Name string `json:"name"` + Size string `json:"size"` +} + +// ScalewayGetContainers represents a list of Scaleway containers (S3) +type ScalewayGetContainers struct { + Containers []ScalewayContainer `json:"containers"` +} + +// ScalewayConnectResponse represents the answer from POST /tokens +type ScalewayConnectResponse struct { + Token ScalewayTokenDefinition `json:"token"` +} + +// ScalewayConnect represents the data to connect +type ScalewayConnect struct { + Email string `json:"email"` + Password string `json:"password"` + Description string `json:"description"` + Expires bool `json:"expires"` +} + +// ScalewayOrganizationDefinition represents a Scaleway Organization +type ScalewayOrganizationDefinition struct { + ID string `json:"id"` + Name string `json:"name"` + Users []ScalewayUserDefinition `json:"users"` +} + +// ScalewayOrganizationsDefinition represents a Scaleway Organizations +type ScalewayOrganizationsDefinition struct { + Organizations []ScalewayOrganizationDefinition `json:"organizations"` +} + +// ScalewayUserDefinition represents a Scaleway User +type ScalewayUserDefinition struct { + Email string `json:"email"` + Firstname string `json:"firstname"` + Fullname string `json:"fullname"` + ID string `json:"id"` + Lastname string `json:"lastname"` + Organizations []ScalewayOrganizationDefinition `json:"organizations"` + Roles []ScalewayRoleDefinition `json:"roles"` + SSHPublicKeys []ScalewayKeyDefinition `json:"ssh_public_keys"` +} + +// ScalewayUsersDefinition represents the response of a GET /user +type ScalewayUsersDefinition struct { + User ScalewayUserDefinition `json:"user"` +} + +// ScalewayKeyDefinition represents a key +type ScalewayKeyDefinition struct { + Key string `json:"key"` + Fingerprint string `json:"fingerprint,omitempty"` +} + +// ScalewayUserPatchSSHKeyDefinition represents a User Patch +type ScalewayUserPatchSSHKeyDefinition struct { + SSHPublicKeys []ScalewayKeyDefinition `json:"ssh_public_keys"` +} + +// ScalewayDashboardResp represents a dashboard received from the API +type ScalewayDashboardResp struct { + Dashboard ScalewayDashboard +} + +// ScalewayDashboard represents a dashboard +type ScalewayDashboard struct { + VolumesCount int `json:"volumes_count"` + RunningServersCount int `json:"running_servers_count"` + ImagesCount int `json:"images_count"` + SnapshotsCount int `json:"snapshots_count"` + ServersCount int `json:"servers_count"` + IPsCount int `json:"ips_count"` +} + +// ScalewayPermissions represents the response of GET /permissions +type ScalewayPermissions map[string]ScalewayPermCategory + +// ScalewayPermCategory represents ScalewayPermissions's fields +type ScalewayPermCategory map[string][]string + +// ScalewayPermissionDefinition represents the permissions +type ScalewayPermissionDefinition struct { + Permissions ScalewayPermissions `json:"permissions"` +} + +// ScalewayUserdatas represents the response of a GET /user_data +type ScalewayUserdatas struct { + UserData []string `json:"user_data"` +} + +// ScalewayQuota represents a map of quota (name, value) +type ScalewayQuota map[string]int + +// ScalewayGetQuotas represents the response of GET /organizations/{orga_id}/quotas +type ScalewayGetQuotas struct { + Quotas ScalewayQuota `json:"quotas"` +} + +// ScalewayUserdata represents []byte +type ScalewayUserdata []byte + +// FuncMap used for json inspection +var FuncMap = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, +} + +// MarketLocalImageDefinition represents localImage of marketplace version +type MarketLocalImageDefinition struct { + Arch string `json:"arch"` + ID string `json:"id"` + Zone string `json:"zone"` +} + +// MarketLocalImages represents an array of local images +type MarketLocalImages struct { + LocalImages []MarketLocalImageDefinition `json:"local_images"` +} + +// MarketLocalImage represents local image +type MarketLocalImage struct { + LocalImages MarketLocalImageDefinition `json:"local_image"` +} + +// MarketVersionDefinition represents version of marketplace image +type MarketVersionDefinition struct { + CreationDate string `json:"creation_date"` + ID string `json:"id"` + Image struct { + ID string `json:"id"` + Name string `json:"name"` + } `json:"image"` + ModificationDate string `json:"modification_date"` + Name string `json:"name"` + MarketLocalImages +} + +// MarketVersions represents an array of marketplace image versions +type MarketVersions struct { + Versions []MarketVersionDefinition `json:"versions"` +} + +// MarketVersion represents version of marketplace image +type MarketVersion struct { + Version MarketVersionDefinition `json:"version"` +} + +// MarketImage represents MarketPlace image +type MarketImage struct { + Categories []string `json:"categories"` + CreationDate string `json:"creation_date"` + CurrentPublicVersion string `json:"current_public_version"` + Description string `json:"description"` + ID string `json:"id"` + Logo string `json:"logo"` + ModificationDate string `json:"modification_date"` + Name string `json:"name"` + Organization struct { + ID string `json:"id"` + Name string `json:"name"` + } `json:"organization"` + Public bool `json:"-"` + MarketVersions +} + +// MarketImages represents MarketPlace images +type MarketImages struct { + Images []MarketImage `json:"images"` +} + +// NewScalewayAPI creates a ready-to-use ScalewayAPI client +func NewScalewayAPI(organization, token, userAgent, region string, options ...func(*ScalewayAPI)) (*ScalewayAPI, error) { + s := &ScalewayAPI{ + // exposed + Organization: organization, + Token: token, + Logger: NewDefaultLogger(), + + // internal + client: &http.Client{}, + verbose: os.Getenv("SCW_VERBOSE_API") != "", + password: "", + userAgent: userAgent, + } + for _, option := range options { + option(s) + } + cache, err := NewScalewayCache(func() { s.Logger.Debugf("Writing cache file to disk") }) + if err != nil { + return nil, err + } + s.Cache = cache + if os.Getenv("SCW_TLSVERIFY") == "0" { + s.client.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + } + switch region { + case "par1", "": + s.computeAPI = ComputeAPIPar1 + case "ams1": + s.computeAPI = ComputeAPIAms1 + default: + return nil, fmt.Errorf("%s isn't a valid region", region) + } + s.Region = region + if url := os.Getenv("SCW_COMPUTE_API"); url != "" { + s.computeAPI = url + } + return s, nil +} + +// ClearCache clears the cache +func (s *ScalewayAPI) ClearCache() { + s.Cache.Clear() +} + +// Sync flushes out the cache to the disk +func (s *ScalewayAPI) Sync() { + s.Cache.Save() +} + +func (s *ScalewayAPI) response(method, uri string, content io.Reader) (resp *http.Response, err error) { + var ( + req *http.Request + ) + + req, err = http.NewRequest(method, uri, content) + if err != nil { + err = fmt.Errorf("response %s %s", method, uri) + return + } + req.Header.Set("X-Auth-Token", s.Token) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", s.userAgent) + s.LogHTTP(req) + if s.verbose { + dump, _ := httputil.DumpRequest(req, true) + s.Debugf("%v", string(dump)) + } else { + s.Debugf("[%s]: %v", method, uri) + } + resp, err = s.client.Do(req) + return +} + +// GetResponsePaginate fetchs all resources and returns an http.Response object for the requested resource +func (s *ScalewayAPI) GetResponsePaginate(apiURL, resource string, values url.Values) (*http.Response, error) { + resp, err := s.response("HEAD", fmt.Sprintf("%s/%s?%s", strings.TrimRight(apiURL, "/"), resource, values.Encode()), nil) + if err != nil { + return nil, err + } + + count := resp.Header.Get("X-Total-Count") + var maxElem int + if count == "" { + maxElem = 0 + } else { + maxElem, err = strconv.Atoi(count) + if err != nil { + return nil, err + } + } + + get := maxElem / perPage + if (float32(maxElem) / perPage) > float32(get) { + get++ + } + + if get <= 1 { // If there is 0 or 1 page of result, the response is not paginated + if len(values) == 0 { + return s.response("GET", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), nil) + } + return s.response("GET", fmt.Sprintf("%s/%s?%s", strings.TrimRight(apiURL, "/"), resource, values.Encode()), nil) + } + + fetchAll := !(values.Get("per_page") != "" || values.Get("page") != "") + if fetchAll { + var g errgroup.Group + + ch := make(chan *http.Response, get) + for i := 1; i <= get; i++ { + i := i // closure tricks + g.Go(func() (err error) { + var resp *http.Response + + val := url.Values{} + val.Set("per_page", fmt.Sprintf("%v", perPage)) + val.Set("page", fmt.Sprintf("%v", i)) + resp, err = s.response("GET", fmt.Sprintf("%s/%s?%s", strings.TrimRight(apiURL, "/"), resource, val.Encode()), nil) + ch <- resp + return + }) + } + if err = g.Wait(); err != nil { + return nil, err + } + newBody := make(map[string][]json.RawMessage) + body := make(map[string][]json.RawMessage) + key := "" + for i := 0; i < get; i++ { + res := <-ch + if res.StatusCode != http.StatusOK { + return res, nil + } + content, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + return nil, err + } + if err := json.Unmarshal(content, &body); err != nil { + return nil, err + } + + if i == 0 { + resp = res + for k := range body { + key = k + break + } + } + newBody[key] = append(newBody[key], body[key]...) + } + payload := new(bytes.Buffer) + if err := json.NewEncoder(payload).Encode(newBody); err != nil { + return nil, err + } + resp.Body = ioutil.NopCloser(payload) + } else { + resp, err = s.response("GET", fmt.Sprintf("%s/%s?%s", strings.TrimRight(apiURL, "/"), resource, values.Encode()), nil) + } + return resp, err +} + +// PostResponse returns an http.Response object for the updated resource +func (s *ScalewayAPI) PostResponse(apiURL, resource string, data interface{}) (*http.Response, error) { + payload := new(bytes.Buffer) + if err := json.NewEncoder(payload).Encode(data); err != nil { + return nil, err + } + return s.response("POST", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), payload) +} + +// PatchResponse returns an http.Response object for the updated resource +func (s *ScalewayAPI) PatchResponse(apiURL, resource string, data interface{}) (*http.Response, error) { + payload := new(bytes.Buffer) + if err := json.NewEncoder(payload).Encode(data); err != nil { + return nil, err + } + return s.response("PATCH", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), payload) +} + +// PutResponse returns an http.Response object for the updated resource +func (s *ScalewayAPI) PutResponse(apiURL, resource string, data interface{}) (*http.Response, error) { + payload := new(bytes.Buffer) + if err := json.NewEncoder(payload).Encode(data); err != nil { + return nil, err + } + return s.response("PUT", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), payload) +} + +// DeleteResponse returns an http.Response object for the deleted resource +func (s *ScalewayAPI) DeleteResponse(apiURL, resource string) (*http.Response, error) { + return s.response("DELETE", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), nil) +} + +// handleHTTPError checks the statusCode and displays the error +func (s *ScalewayAPI) handleHTTPError(goodStatusCode []int, resp *http.Response) ([]byte, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if s.verbose { + resp.Body = ioutil.NopCloser(bytes.NewBuffer(body)) + dump, err := httputil.DumpResponse(resp, true) + if err == nil { + var js bytes.Buffer + + err = json.Indent(&js, body, "", " ") + if err != nil { + s.Debugf("[Response]: [%v]\n%v", resp.StatusCode, string(dump)) + } else { + s.Debugf("[Response]: [%v]\n%v", resp.StatusCode, js.String()) + } + } + } else { + s.Debugf("[Response]: [%v]\n%v", resp.StatusCode, string(body)) + } + + if resp.StatusCode >= http.StatusInternalServerError { + return nil, errors.New(string(body)) + } + good := false + for _, code := range goodStatusCode { + if code == resp.StatusCode { + good = true + } + } + if !good { + var scwError ScalewayAPIError + + if err := json.Unmarshal(body, &scwError); err != nil { + return nil, err + } + scwError.StatusCode = resp.StatusCode + s.Debugf("%s", scwError.Error()) + return nil, scwError + } + return body, nil +} + +func (s *ScalewayAPI) fetchServers(api string, query url.Values, out chan<- ScalewayServers) func() error { + return func() error { + resp, err := s.GetResponsePaginate(api, "servers", query) + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return err + } + var servers ScalewayServers + + if err = json.Unmarshal(body, &servers); err != nil { + return err + } + out <- servers + return nil + } +} + +// GetServers gets the list of servers from the ScalewayAPI +func (s *ScalewayAPI) GetServers(all bool, limit int) (*[]ScalewayServer, error) { + query := url.Values{} + if !all { + query.Set("state", "running") + } + if limit > 0 { + // FIXME: wait for the API to be ready + // query.Set("per_page", strconv.Itoa(limit)) + panic("Not implemented yet") + } + if all && limit == 0 { + s.Cache.ClearServers() + } + var ( + g errgroup.Group + apis = []string{ + ComputeAPIPar1, + ComputeAPIAms1, + } + ) + + serverChan := make(chan ScalewayServers, 2) + for _, api := range apis { + g.Go(s.fetchServers(api, query, serverChan)) + } + + if err := g.Wait(); err != nil { + return nil, err + } + close(serverChan) + var servers ScalewayServers + + for server := range serverChan { + servers.Servers = append(servers.Servers, server.Servers...) + } + + for i, server := range servers.Servers { + servers.Servers[i].DNSPublic = server.Identifier + URLPublicDNS + servers.Servers[i].DNSPrivate = server.Identifier + URLPrivateDNS + s.Cache.InsertServer(server.Identifier, server.Location.ZoneID, server.Arch, server.Organization, server.Name) + } + return &servers.Servers, nil +} + +// ScalewaySortServers represents a wrapper to sort by CreationDate the servers +type ScalewaySortServers []ScalewayServer + +func (s ScalewaySortServers) Len() int { + return len(s) +} + +func (s ScalewaySortServers) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s ScalewaySortServers) Less(i, j int) bool { + date1, _ := time.Parse("2006-01-02T15:04:05.000000+00:00", s[i].CreationDate) + date2, _ := time.Parse("2006-01-02T15:04:05.000000+00:00", s[j].CreationDate) + return date2.Before(date1) +} + +// GetServer gets a server from the ScalewayAPI +func (s *ScalewayAPI) GetServer(serverID string) (*ScalewayServer, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "servers/"+serverID, url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + + var oneServer ScalewayOneServer + + if err = json.Unmarshal(body, &oneServer); err != nil { + return nil, err + } + // FIXME arch, owner, title + oneServer.Server.DNSPublic = oneServer.Server.Identifier + URLPublicDNS + oneServer.Server.DNSPrivate = oneServer.Server.Identifier + URLPrivateDNS + s.Cache.InsertServer(oneServer.Server.Identifier, oneServer.Server.Location.ZoneID, oneServer.Server.Arch, oneServer.Server.Organization, oneServer.Server.Name) + return &oneServer.Server, nil +} + +// PostServerAction posts an action on a server +func (s *ScalewayAPI) PostServerAction(serverID, action string) error { + data := ScalewayServerAction{ + Action: action, + } + resp, err := s.PostResponse(s.computeAPI, fmt.Sprintf("servers/%s/action", serverID), data) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusAccepted}, resp) + return err +} + +// DeleteServer deletes a server +func (s *ScalewayAPI) DeleteServer(serverID string) error { + defer s.Cache.RemoveServer(serverID) + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("servers/%s", serverID)) + if err != nil { + return err + } + defer resp.Body.Close() + + if _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp); err != nil { + return err + } + return nil +} + +// PostServer creates a new server +func (s *ScalewayAPI) PostServer(definition ScalewayServerDefinition) (string, error) { + definition.Organization = s.Organization + + resp, err := s.PostResponse(s.computeAPI, "servers", definition) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) + if err != nil { + return "", err + } + var server ScalewayOneServer + + if err = json.Unmarshal(body, &server); err != nil { + return "", err + } + // FIXME arch, owner, title + s.Cache.InsertServer(server.Server.Identifier, server.Server.Location.ZoneID, server.Server.Arch, server.Server.Organization, server.Server.Name) + return server.Server.Identifier, nil +} + +// PatchUserSSHKey updates a user +func (s *ScalewayAPI) PatchUserSSHKey(UserID string, definition ScalewayUserPatchSSHKeyDefinition) error { + resp, err := s.PatchResponse(AccountAPI, fmt.Sprintf("users/%s", UserID), definition) + if err != nil { + return err + } + + defer resp.Body.Close() + + if _, err := s.handleHTTPError([]int{http.StatusOK}, resp); err != nil { + return err + } + return nil +} + +// PatchServer updates a server +func (s *ScalewayAPI) PatchServer(serverID string, definition ScalewayServerPatchDefinition) error { + resp, err := s.PatchResponse(s.computeAPI, fmt.Sprintf("servers/%s", serverID), definition) + if err != nil { + return err + } + defer resp.Body.Close() + + if _, err := s.handleHTTPError([]int{http.StatusOK}, resp); err != nil { + return err + } + return nil +} + +// PostSnapshot creates a new snapshot +func (s *ScalewayAPI) PostSnapshot(volumeID string, name string) (string, error) { + definition := ScalewaySnapshotDefinition{ + VolumeIDentifier: volumeID, + Name: name, + Organization: s.Organization, + } + resp, err := s.PostResponse(s.computeAPI, "snapshots", definition) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) + if err != nil { + return "", err + } + var snapshot ScalewayOneSnapshot + + if err = json.Unmarshal(body, &snapshot); err != nil { + return "", err + } + // FIXME arch, owner, title + s.Cache.InsertSnapshot(snapshot.Snapshot.Identifier, "", "", snapshot.Snapshot.Organization, snapshot.Snapshot.Name) + return snapshot.Snapshot.Identifier, nil +} + +// PostImage creates a new image +func (s *ScalewayAPI) PostImage(volumeID string, name string, bootscript string, arch string) (string, error) { + definition := ScalewayImageDefinition{ + SnapshotIDentifier: volumeID, + Name: name, + Organization: s.Organization, + Arch: arch, + } + if bootscript != "" { + definition.DefaultBootscript = &bootscript + } + + resp, err := s.PostResponse(s.computeAPI, "images", definition) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) + if err != nil { + return "", err + } + var image ScalewayOneImage + + if err = json.Unmarshal(body, &image); err != nil { + return "", err + } + // FIXME region, arch, owner, title + s.Cache.InsertImage(image.Image.Identifier, "", image.Image.Arch, image.Image.Organization, image.Image.Name, "") + return image.Image.Identifier, nil +} + +// PostVolume creates a new volume +func (s *ScalewayAPI) PostVolume(definition ScalewayVolumeDefinition) (string, error) { + definition.Organization = s.Organization + if definition.Type == "" { + definition.Type = "l_ssd" + } + + resp, err := s.PostResponse(s.computeAPI, "volumes", definition) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) + if err != nil { + return "", err + } + var volume ScalewayOneVolume + + if err = json.Unmarshal(body, &volume); err != nil { + return "", err + } + // FIXME: s.Cache.InsertVolume(volume.Volume.Identifier, volume.Volume.Name) + return volume.Volume.Identifier, nil +} + +// PutVolume updates a volume +func (s *ScalewayAPI) PutVolume(volumeID string, definition ScalewayVolumePutDefinition) error { + resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("volumes/%s", volumeID), definition) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// ResolveServer attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveServer(needle string) (ScalewayResolverResults, error) { + servers, err := s.Cache.LookUpServers(needle, true) + if err != nil { + return servers, err + } + if len(servers) == 0 { + if _, err = s.GetServers(true, 0); err != nil { + return nil, err + } + servers, err = s.Cache.LookUpServers(needle, true) + } + return servers, err +} + +// ResolveVolume attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveVolume(needle string) (ScalewayResolverResults, error) { + volumes, err := s.Cache.LookUpVolumes(needle, true) + if err != nil { + return volumes, err + } + if len(volumes) == 0 { + if _, err = s.GetVolumes(); err != nil { + return nil, err + } + volumes, err = s.Cache.LookUpVolumes(needle, true) + } + return volumes, err +} + +// ResolveSnapshot attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveSnapshot(needle string) (ScalewayResolverResults, error) { + snapshots, err := s.Cache.LookUpSnapshots(needle, true) + if err != nil { + return snapshots, err + } + if len(snapshots) == 0 { + if _, err = s.GetSnapshots(); err != nil { + return nil, err + } + snapshots, err = s.Cache.LookUpSnapshots(needle, true) + } + return snapshots, err +} + +// ResolveImage attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveImage(needle string) (ScalewayResolverResults, error) { + images, err := s.Cache.LookUpImages(needle, true) + if err != nil { + return images, err + } + if len(images) == 0 { + if _, err = s.GetImages(); err != nil { + return nil, err + } + images, err = s.Cache.LookUpImages(needle, true) + } + return images, err +} + +// ResolveBootscript attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveBootscript(needle string) (ScalewayResolverResults, error) { + bootscripts, err := s.Cache.LookUpBootscripts(needle, true) + if err != nil { + return bootscripts, err + } + if len(bootscripts) == 0 { + if _, err = s.GetBootscripts(); err != nil { + return nil, err + } + bootscripts, err = s.Cache.LookUpBootscripts(needle, true) + } + return bootscripts, err +} + +// GetImages gets the list of images from the ScalewayAPI +func (s *ScalewayAPI) GetImages() (*[]MarketImage, error) { + images, err := s.GetMarketPlaceImages("") + if err != nil { + return nil, err + } + s.Cache.ClearImages() + for i, image := range images.Images { + if image.CurrentPublicVersion != "" { + for _, version := range image.Versions { + if version.ID == image.CurrentPublicVersion { + for _, localImage := range version.LocalImages { + images.Images[i].Public = true + s.Cache.InsertImage(localImage.ID, localImage.Zone, localImage.Arch, image.Organization.ID, image.Name, image.CurrentPublicVersion) + } + } + } + } + } + values := url.Values{} + values.Set("organization", s.Organization) + resp, err := s.GetResponsePaginate(s.computeAPI, "images", values) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var OrgaImages ScalewayImages + + if err = json.Unmarshal(body, &OrgaImages); err != nil { + return nil, err + } + + for _, orgaImage := range OrgaImages.Images { + images.Images = append(images.Images, MarketImage{ + Categories: []string{"MyImages"}, + CreationDate: orgaImage.CreationDate, + CurrentPublicVersion: orgaImage.Identifier, + ModificationDate: orgaImage.ModificationDate, + Name: orgaImage.Name, + Public: false, + MarketVersions: MarketVersions{ + Versions: []MarketVersionDefinition{ + { + CreationDate: orgaImage.CreationDate, + ID: orgaImage.Identifier, + ModificationDate: orgaImage.ModificationDate, + MarketLocalImages: MarketLocalImages{ + LocalImages: []MarketLocalImageDefinition{ + { + Arch: orgaImage.Arch, + ID: orgaImage.Identifier, + // TODO: fecth images from ams1 and par1 + Zone: s.Region, + }, + }, + }, + }, + }, + }, + }) + s.Cache.InsertImage(orgaImage.Identifier, s.Region, orgaImage.Arch, orgaImage.Organization, orgaImage.Name, "") + } + return &images.Images, nil +} + +// GetImage gets an image from the ScalewayAPI +func (s *ScalewayAPI) GetImage(imageID string) (*ScalewayImage, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "images/"+imageID, url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var oneImage ScalewayOneImage + + if err = json.Unmarshal(body, &oneImage); err != nil { + return nil, err + } + // FIXME owner, title + s.Cache.InsertImage(oneImage.Image.Identifier, s.Region, oneImage.Image.Arch, oneImage.Image.Organization, oneImage.Image.Name, "") + return &oneImage.Image, nil +} + +// DeleteImage deletes a image +func (s *ScalewayAPI) DeleteImage(imageID string) error { + defer s.Cache.RemoveImage(imageID) + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("images/%s", imageID)) + if err != nil { + return err + } + defer resp.Body.Close() + + if _, err := s.handleHTTPError([]int{http.StatusNoContent}, resp); err != nil { + return err + } + return nil +} + +// DeleteSnapshot deletes a snapshot +func (s *ScalewayAPI) DeleteSnapshot(snapshotID string) error { + defer s.Cache.RemoveSnapshot(snapshotID) + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("snapshots/%s", snapshotID)) + if err != nil { + return err + } + defer resp.Body.Close() + + if _, err := s.handleHTTPError([]int{http.StatusNoContent}, resp); err != nil { + return err + } + return nil +} + +// DeleteVolume deletes a volume +func (s *ScalewayAPI) DeleteVolume(volumeID string) error { + defer s.Cache.RemoveVolume(volumeID) + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("volumes/%s", volumeID)) + if err != nil { + return err + } + defer resp.Body.Close() + + if _, err := s.handleHTTPError([]int{http.StatusNoContent}, resp); err != nil { + return err + } + return nil +} + +// GetSnapshots gets the list of snapshots from the ScalewayAPI +func (s *ScalewayAPI) GetSnapshots() (*[]ScalewaySnapshot, error) { + query := url.Values{} + s.Cache.ClearSnapshots() + + resp, err := s.GetResponsePaginate(s.computeAPI, "snapshots", query) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var snapshots ScalewaySnapshots + + if err = json.Unmarshal(body, &snapshots); err != nil { + return nil, err + } + for _, snapshot := range snapshots.Snapshots { + // FIXME region, arch, owner, title + s.Cache.InsertSnapshot(snapshot.Identifier, "", "", snapshot.Organization, snapshot.Name) + } + return &snapshots.Snapshots, nil +} + +// GetSnapshot gets a snapshot from the ScalewayAPI +func (s *ScalewayAPI) GetSnapshot(snapshotID string) (*ScalewaySnapshot, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "snapshots/"+snapshotID, url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var oneSnapshot ScalewayOneSnapshot + + if err = json.Unmarshal(body, &oneSnapshot); err != nil { + return nil, err + } + // FIXME region, arch, owner, title + s.Cache.InsertSnapshot(oneSnapshot.Snapshot.Identifier, "", "", oneSnapshot.Snapshot.Organization, oneSnapshot.Snapshot.Name) + return &oneSnapshot.Snapshot, nil +} + +// GetVolumes gets the list of volumes from the ScalewayAPI +func (s *ScalewayAPI) GetVolumes() (*[]ScalewayVolume, error) { + query := url.Values{} + s.Cache.ClearVolumes() + + resp, err := s.GetResponsePaginate(s.computeAPI, "volumes", query) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + + var volumes ScalewayVolumes + + if err = json.Unmarshal(body, &volumes); err != nil { + return nil, err + } + for _, volume := range volumes.Volumes { + // FIXME region, arch, owner, title + s.Cache.InsertVolume(volume.Identifier, "", "", volume.Organization, volume.Name) + } + return &volumes.Volumes, nil +} + +// GetVolume gets a volume from the ScalewayAPI +func (s *ScalewayAPI) GetVolume(volumeID string) (*ScalewayVolume, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "volumes/"+volumeID, url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var oneVolume ScalewayOneVolume + + if err = json.Unmarshal(body, &oneVolume); err != nil { + return nil, err + } + // FIXME region, arch, owner, title + s.Cache.InsertVolume(oneVolume.Volume.Identifier, "", "", oneVolume.Volume.Organization, oneVolume.Volume.Name) + return &oneVolume.Volume, nil +} + +// GetBootscripts gets the list of bootscripts from the ScalewayAPI +func (s *ScalewayAPI) GetBootscripts() (*[]ScalewayBootscript, error) { + query := url.Values{} + + s.Cache.ClearBootscripts() + resp, err := s.GetResponsePaginate(s.computeAPI, "bootscripts", query) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var bootscripts ScalewayBootscripts + + if err = json.Unmarshal(body, &bootscripts); err != nil { + return nil, err + } + for _, bootscript := range bootscripts.Bootscripts { + // FIXME region, arch, owner, title + s.Cache.InsertBootscript(bootscript.Identifier, "", bootscript.Arch, bootscript.Organization, bootscript.Title) + } + return &bootscripts.Bootscripts, nil +} + +// GetBootscript gets a bootscript from the ScalewayAPI +func (s *ScalewayAPI) GetBootscript(bootscriptID string) (*ScalewayBootscript, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "bootscripts/"+bootscriptID, url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var oneBootscript ScalewayOneBootscript + + if err = json.Unmarshal(body, &oneBootscript); err != nil { + return nil, err + } + // FIXME region, arch, owner, title + s.Cache.InsertBootscript(oneBootscript.Bootscript.Identifier, "", oneBootscript.Bootscript.Arch, oneBootscript.Bootscript.Organization, oneBootscript.Bootscript.Title) + return &oneBootscript.Bootscript, nil +} + +// GetUserdatas gets list of userdata for a server +func (s *ScalewayAPI) GetUserdatas(serverID string, metadata bool) (*ScalewayUserdatas, error) { + var uri, endpoint string + + endpoint = s.computeAPI + if metadata { + uri = "/user_data" + endpoint = MetadataAPI + } else { + uri = fmt.Sprintf("servers/%s/user_data", serverID) + } + + resp, err := s.GetResponsePaginate(endpoint, uri, url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var userdatas ScalewayUserdatas + + if err = json.Unmarshal(body, &userdatas); err != nil { + return nil, err + } + return &userdatas, nil +} + +func (s *ScalewayUserdata) String() string { + return string(*s) +} + +// GetUserdata gets a specific userdata for a server +func (s *ScalewayAPI) GetUserdata(serverID, key string, metadata bool) (*ScalewayUserdata, error) { + var uri, endpoint string + + endpoint = s.computeAPI + if metadata { + uri = fmt.Sprintf("/user_data/%s", key) + endpoint = MetadataAPI + } else { + uri = fmt.Sprintf("servers/%s/user_data/%s", serverID, key) + } + + var err error + resp, err := s.GetResponsePaginate(endpoint, uri, url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("no such user_data %q (%d)", key, resp.StatusCode) + } + var data ScalewayUserdata + data, err = ioutil.ReadAll(resp.Body) + return &data, err +} + +// PatchUserdata sets a user data +func (s *ScalewayAPI) PatchUserdata(serverID, key string, value []byte, metadata bool) error { + var resource, endpoint string + + endpoint = s.computeAPI + if metadata { + resource = fmt.Sprintf("/user_data/%s", key) + endpoint = MetadataAPI + } else { + resource = fmt.Sprintf("servers/%s/user_data/%s", serverID, key) + } + + uri := fmt.Sprintf("%s/%s", strings.TrimRight(endpoint, "/"), resource) + payload := new(bytes.Buffer) + payload.Write(value) + + req, err := http.NewRequest("PATCH", uri, payload) + if err != nil { + return err + } + + req.Header.Set("X-Auth-Token", s.Token) + req.Header.Set("Content-Type", "text/plain") + req.Header.Set("User-Agent", s.userAgent) + + s.LogHTTP(req) + + resp, err := s.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNoContent { + return nil + } + + return fmt.Errorf("cannot set user_data (%d)", resp.StatusCode) +} + +// DeleteUserdata deletes a server user_data +func (s *ScalewayAPI) DeleteUserdata(serverID, key string, metadata bool) error { + var url, endpoint string + + endpoint = s.computeAPI + if metadata { + url = fmt.Sprintf("/user_data/%s", key) + endpoint = MetadataAPI + } else { + url = fmt.Sprintf("servers/%s/user_data/%s", serverID, key) + } + + resp, err := s.DeleteResponse(endpoint, url) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// GetTasks get the list of tasks from the ScalewayAPI +func (s *ScalewayAPI) GetTasks() (*[]ScalewayTask, error) { + query := url.Values{} + resp, err := s.GetResponsePaginate(s.computeAPI, "tasks", query) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var tasks ScalewayTasks + + if err = json.Unmarshal(body, &tasks); err != nil { + return nil, err + } + return &tasks.Tasks, nil +} + +// CheckCredentials performs a dummy check to ensure we can contact the API +func (s *ScalewayAPI) CheckCredentials() error { + query := url.Values{} + + resp, err := s.GetResponsePaginate(AccountAPI, "tokens", query) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return err + } + found := false + var tokens ScalewayGetTokens + + if err = json.Unmarshal(body, &tokens); err != nil { + return err + } + for _, token := range tokens.Tokens { + if token.ID == s.Token { + found = true + break + } + } + if !found { + return fmt.Errorf("Invalid token %v", s.Token) + } + return nil +} + +// GetUserID returns the userID +func (s *ScalewayAPI) GetUserID() (string, error) { + resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("tokens/%s", s.Token), url.Values{}) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return "", err + } + var token ScalewayTokensDefinition + + if err = json.Unmarshal(body, &token); err != nil { + return "", err + } + return token.Token.UserID, nil +} + +// GetOrganization returns Organization +func (s *ScalewayAPI) GetOrganization() (*ScalewayOrganizationsDefinition, error) { + resp, err := s.GetResponsePaginate(AccountAPI, "organizations", url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var data ScalewayOrganizationsDefinition + + if err = json.Unmarshal(body, &data); err != nil { + return nil, err + } + return &data, nil +} + +// GetUser returns the user +func (s *ScalewayAPI) GetUser() (*ScalewayUserDefinition, error) { + userID, err := s.GetUserID() + if err != nil { + return nil, err + } + resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("users/%s", userID), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var user ScalewayUsersDefinition + + if err = json.Unmarshal(body, &user); err != nil { + return nil, err + } + return &user.User, nil +} + +// GetPermissions returns the permissions +func (s *ScalewayAPI) GetPermissions() (*ScalewayPermissionDefinition, error) { + resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("tokens/%s/permissions", s.Token), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var permissions ScalewayPermissionDefinition + + if err = json.Unmarshal(body, &permissions); err != nil { + return nil, err + } + return &permissions, nil +} + +// GetDashboard returns the dashboard +func (s *ScalewayAPI) GetDashboard() (*ScalewayDashboard, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "dashboard", url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var dashboard ScalewayDashboardResp + + if err = json.Unmarshal(body, &dashboard); err != nil { + return nil, err + } + return &dashboard.Dashboard, nil +} + +// GetServerID returns exactly one server matching +func (s *ScalewayAPI) GetServerID(needle string) (string, error) { + // Parses optional type prefix, i.e: "server:name" -> "name" + _, needle = parseNeedle(needle) + + servers, err := s.ResolveServer(needle) + if err != nil { + return "", fmt.Errorf("Unable to resolve server %s: %s", needle, err) + } + if len(servers) == 1 { + return servers[0].Identifier, nil + } + if len(servers) == 0 { + return "", fmt.Errorf("No such server: %s", needle) + } + return "", showResolverResults(needle, servers) +} + +func showResolverResults(needle string, results ScalewayResolverResults) error { + w := tabwriter.NewWriter(os.Stderr, 20, 1, 3, ' ', 0) + defer w.Flush() + sort.Sort(results) + fmt.Fprintf(w, " IMAGEID\tFROM\tNAME\tZONE\tARCH\n") + for _, result := range results { + if result.Arch == "" { + result.Arch = "n/a" + } + fmt.Fprintf(w, "- %s\t%s\t%s\t%s\t%s\n", result.TruncIdentifier(), result.CodeName(), result.Name, result.Region, result.Arch) + } + return fmt.Errorf("Too many candidates for %s (%d)", needle, len(results)) +} + +// GetVolumeID returns exactly one volume matching +func (s *ScalewayAPI) GetVolumeID(needle string) (string, error) { + // Parses optional type prefix, i.e: "volume:name" -> "name" + _, needle = parseNeedle(needle) + + volumes, err := s.ResolveVolume(needle) + if err != nil { + return "", fmt.Errorf("Unable to resolve volume %s: %s", needle, err) + } + if len(volumes) == 1 { + return volumes[0].Identifier, nil + } + if len(volumes) == 0 { + return "", fmt.Errorf("No such volume: %s", needle) + } + return "", showResolverResults(needle, volumes) +} + +// GetSnapshotID returns exactly one snapshot matching +func (s *ScalewayAPI) GetSnapshotID(needle string) (string, error) { + // Parses optional type prefix, i.e: "snapshot:name" -> "name" + _, needle = parseNeedle(needle) + + snapshots, err := s.ResolveSnapshot(needle) + if err != nil { + return "", fmt.Errorf("Unable to resolve snapshot %s: %s", needle, err) + } + if len(snapshots) == 1 { + return snapshots[0].Identifier, nil + } + if len(snapshots) == 0 { + return "", fmt.Errorf("No such snapshot: %s", needle) + } + return "", showResolverResults(needle, snapshots) +} + +// FilterImagesByArch removes entry that doesn't match with architecture +func FilterImagesByArch(res ScalewayResolverResults, arch string) (ret ScalewayResolverResults) { + if arch == "*" { + return res + } + for _, result := range res { + if result.Arch == arch { + ret = append(ret, result) + } + } + return +} + +// FilterImagesByRegion removes entry that doesn't match with region +func FilterImagesByRegion(res ScalewayResolverResults, region string) (ret ScalewayResolverResults) { + if region == "*" { + return res + } + for _, result := range res { + if result.Region == region { + ret = append(ret, result) + } + } + return +} + +// GetImageID returns exactly one image matching +func (s *ScalewayAPI) GetImageID(needle, arch string) (*ScalewayImageIdentifier, error) { + // Parses optional type prefix, i.e: "image:name" -> "name" + _, needle = parseNeedle(needle) + + images, err := s.ResolveImage(needle) + if err != nil { + return nil, fmt.Errorf("Unable to resolve image %s: %s", needle, err) + } + images = FilterImagesByArch(images, arch) + images = FilterImagesByRegion(images, s.Region) + if len(images) == 1 { + return &ScalewayImageIdentifier{ + Identifier: images[0].Identifier, + Arch: images[0].Arch, + // FIXME region, owner hardcoded + Region: images[0].Region, + Owner: "", + }, nil + } + if len(images) == 0 { + return nil, fmt.Errorf("No such image (zone %s, arch %s) : %s", s.Region, arch, needle) + } + return nil, showResolverResults(needle, images) +} + +// GetSecurityGroups returns a ScalewaySecurityGroups +func (s *ScalewayAPI) GetSecurityGroups() (*ScalewayGetSecurityGroups, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "security_groups", url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var securityGroups ScalewayGetSecurityGroups + + if err = json.Unmarshal(body, &securityGroups); err != nil { + return nil, err + } + return &securityGroups, nil +} + +// GetSecurityGroupRules returns a ScalewaySecurityGroupRules +func (s *ScalewayAPI) GetSecurityGroupRules(groupID string) (*ScalewayGetSecurityGroupRules, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s/rules", groupID), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var securityGroupRules ScalewayGetSecurityGroupRules + + if err = json.Unmarshal(body, &securityGroupRules); err != nil { + return nil, err + } + return &securityGroupRules, nil +} + +// GetASecurityGroupRule returns a ScalewaySecurityGroupRule +func (s *ScalewayAPI) GetASecurityGroupRule(groupID string, rulesID string) (*ScalewayGetSecurityGroupRule, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", groupID, rulesID), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var securityGroupRules ScalewayGetSecurityGroupRule + + if err = json.Unmarshal(body, &securityGroupRules); err != nil { + return nil, err + } + return &securityGroupRules, nil +} + +// GetASecurityGroup returns a ScalewaySecurityGroup +func (s *ScalewayAPI) GetASecurityGroup(groupsID string) (*ScalewayGetSecurityGroup, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s", groupsID), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var securityGroups ScalewayGetSecurityGroup + + if err = json.Unmarshal(body, &securityGroups); err != nil { + return nil, err + } + return &securityGroups, nil +} + +// PostSecurityGroup posts a group on a server +func (s *ScalewayAPI) PostSecurityGroup(group ScalewayNewSecurityGroup) error { + resp, err := s.PostResponse(s.computeAPI, "security_groups", group) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusCreated}, resp) + return err +} + +// PostSecurityGroupRule posts a rule on a server +func (s *ScalewayAPI) PostSecurityGroupRule(SecurityGroupID string, rules ScalewayNewSecurityGroupRule) error { + resp, err := s.PostResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules", SecurityGroupID), rules) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusCreated}, resp) + return err +} + +// DeleteSecurityGroup deletes a SecurityGroup +func (s *ScalewayAPI) DeleteSecurityGroup(securityGroupID string) error { + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("security_groups/%s", securityGroupID)) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// PutSecurityGroup updates a SecurityGroup +func (s *ScalewayAPI) PutSecurityGroup(group ScalewayUpdateSecurityGroup, securityGroupID string) error { + resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("security_groups/%s", securityGroupID), group) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// PutSecurityGroupRule updates a SecurityGroupRule +func (s *ScalewayAPI) PutSecurityGroupRule(rules ScalewayNewSecurityGroupRule, securityGroupID, RuleID string) error { + resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", securityGroupID, RuleID), rules) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// DeleteSecurityGroupRule deletes a SecurityGroupRule +func (s *ScalewayAPI) DeleteSecurityGroupRule(SecurityGroupID, RuleID string) error { + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", SecurityGroupID, RuleID)) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// GetContainers returns a ScalewayGetContainers +func (s *ScalewayAPI) GetContainers() (*ScalewayGetContainers, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "containers", url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var containers ScalewayGetContainers + + if err = json.Unmarshal(body, &containers); err != nil { + return nil, err + } + return &containers, nil +} + +// GetContainerDatas returns a ScalewayGetContainerDatas +func (s *ScalewayAPI) GetContainerDatas(container string) (*ScalewayGetContainerDatas, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("containers/%s", container), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var datas ScalewayGetContainerDatas + + if err = json.Unmarshal(body, &datas); err != nil { + return nil, err + } + return &datas, nil +} + +// GetIPS returns a ScalewayGetIPS +func (s *ScalewayAPI) GetIPS() (*ScalewayGetIPS, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "ips", url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var ips ScalewayGetIPS + + if err = json.Unmarshal(body, &ips); err != nil { + return nil, err + } + return &ips, nil +} + +// NewIP returns a new IP +func (s *ScalewayAPI) NewIP() (*ScalewayGetIP, error) { + var orga struct { + Organization string `json:"organization"` + } + orga.Organization = s.Organization + resp, err := s.PostResponse(s.computeAPI, "ips", orga) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) + if err != nil { + return nil, err + } + var ip ScalewayGetIP + + if err = json.Unmarshal(body, &ip); err != nil { + return nil, err + } + return &ip, nil +} + +// AttachIP attachs an IP to a server +func (s *ScalewayAPI) AttachIP(ipID, serverID string) error { + var update struct { + Address string `json:"address"` + ID string `json:"id"` + Reverse *string `json:"reverse"` + Organization string `json:"organization"` + Server string `json:"server"` + } + + ip, err := s.GetIP(ipID) + if err != nil { + return err + } + update.Address = ip.IP.Address + update.ID = ip.IP.ID + update.Organization = ip.IP.Organization + update.Server = serverID + resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("ips/%s", ipID), update) + if err != nil { + return err + } + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// DetachIP detaches an IP from a server +func (s *ScalewayAPI) DetachIP(ipID string) error { + ip, err := s.GetIP(ipID) + if err != nil { + return err + } + ip.IP.Server = nil + resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("ips/%s", ipID), ip.IP) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// DeleteIP deletes an IP +func (s *ScalewayAPI) DeleteIP(ipID string) error { + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("ips/%s", ipID)) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// GetIP returns a ScalewayGetIP +func (s *ScalewayAPI) GetIP(ipID string) (*ScalewayGetIP, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("ips/%s", ipID), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var ip ScalewayGetIP + + if err = json.Unmarshal(body, &ip); err != nil { + return nil, err + } + return &ip, nil +} + +// GetQuotas returns a ScalewayGetQuotas +func (s *ScalewayAPI) GetQuotas() (*ScalewayGetQuotas, error) { + resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("organizations/%s/quotas", s.Organization), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var quotas ScalewayGetQuotas + + if err = json.Unmarshal(body, "as); err != nil { + return nil, err + } + return "as, nil +} + +// GetBootscriptID returns exactly one bootscript matching +func (s *ScalewayAPI) GetBootscriptID(needle, arch string) (string, error) { + // Parses optional type prefix, i.e: "bootscript:name" -> "name" + _, needle = parseNeedle(needle) + + bootscripts, err := s.ResolveBootscript(needle) + if err != nil { + return "", fmt.Errorf("Unable to resolve bootscript %s: %s", needle, err) + } + bootscripts.FilterByArch(arch) + if len(bootscripts) == 1 { + return bootscripts[0].Identifier, nil + } + if len(bootscripts) == 0 { + return "", fmt.Errorf("No such bootscript: %s", needle) + } + return "", showResolverResults(needle, bootscripts) +} + +func rootNetDial(network, addr string) (net.Conn, error) { + dialer := net.Dialer{ + Timeout: 10 * time.Second, + KeepAlive: 10 * time.Second, + } + + // bruteforce privileged ports + var localAddr net.Addr + var err error + for port := 1; port <= 1024; port++ { + localAddr, err = net.ResolveTCPAddr("tcp", fmt.Sprintf(":%d", port)) + + // this should never happen + if err != nil { + return nil, err + } + + dialer.LocalAddr = localAddr + + conn, err := dialer.Dial(network, addr) + + // if err is nil, dialer.Dial succeed, so let's go + // else, err != nil, but we don't care + if err == nil { + return conn, nil + } + } + // if here, all privileged ports were tried without success + return nil, fmt.Errorf("bind: permission denied, are you root ?") +} + +// SetPassword register the password +func (s *ScalewayAPI) SetPassword(password string) { + s.password = password +} + +// GetMarketPlaceImages returns images from marketplace +func (s *ScalewayAPI) GetMarketPlaceImages(uuidImage string) (*MarketImages, error) { + resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%s", uuidImage), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var ret MarketImages + + if uuidImage != "" { + ret.Images = make([]MarketImage, 1) + + var img MarketImage + + if err = json.Unmarshal(body, &img); err != nil { + return nil, err + } + ret.Images[0] = img + } else { + if err = json.Unmarshal(body, &ret); err != nil { + return nil, err + } + } + return &ret, nil +} + +// GetMarketPlaceImageVersions returns image version +func (s *ScalewayAPI) GetMarketPlaceImageVersions(uuidImage, uuidVersion string) (*MarketVersions, error) { + resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s", uuidImage, uuidVersion), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var ret MarketVersions + + if uuidImage != "" { + var version MarketVersion + ret.Versions = make([]MarketVersionDefinition, 1) + + if err = json.Unmarshal(body, &version); err != nil { + return nil, err + } + ret.Versions[0] = version.Version + } else { + if err = json.Unmarshal(body, &ret); err != nil { + return nil, err + } + } + return &ret, nil +} + +// GetMarketPlaceImageCurrentVersion return the image current version +func (s *ScalewayAPI) GetMarketPlaceImageCurrentVersion(uuidImage string) (*MarketVersion, error) { + resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%v/versions/current", uuidImage), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var ret MarketVersion + + if err = json.Unmarshal(body, &ret); err != nil { + return nil, err + } + return &ret, nil +} + +// GetMarketPlaceLocalImages returns images from local region +func (s *ScalewayAPI) GetMarketPlaceLocalImages(uuidImage, uuidVersion, uuidLocalImage string) (*MarketLocalImages, error) { + resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%s", uuidImage, uuidVersion, uuidLocalImage), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var ret MarketLocalImages + if uuidLocalImage != "" { + var localImage MarketLocalImage + ret.LocalImages = make([]MarketLocalImageDefinition, 1) + + if err = json.Unmarshal(body, &localImage); err != nil { + return nil, err + } + ret.LocalImages[0] = localImage.LocalImages + } else { + if err = json.Unmarshal(body, &ret); err != nil { + return nil, err + } + } + return &ret, nil +} + +// PostMarketPlaceImage adds new image +func (s *ScalewayAPI) PostMarketPlaceImage(images MarketImage) error { + resp, err := s.PostResponse(MarketplaceAPI, "images/", images) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusAccepted}, resp) + return err +} + +// PostMarketPlaceImageVersion adds new image version +func (s *ScalewayAPI) PostMarketPlaceImageVersion(uuidImage string, version MarketVersion) error { + resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions", uuidImage), version) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusAccepted}, resp) + return err +} + +// PostMarketPlaceLocalImage adds new local image +func (s *ScalewayAPI) PostMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string, local MarketLocalImage) error { + resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage), local) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusAccepted}, resp) + return err +} + +// PutMarketPlaceImage updates image +func (s *ScalewayAPI) PutMarketPlaceImage(uudiImage string, images MarketImage) error { + resp, err := s.PutResponse(MarketplaceAPI, fmt.Sprintf("images/%v", uudiImage), images) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// PutMarketPlaceImageVersion updates image version +func (s *ScalewayAPI) PutMarketPlaceImageVersion(uuidImage, uuidVersion string, version MarketVersion) error { + resp, err := s.PutResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%v", uuidImage, uuidVersion), version) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// PutMarketPlaceLocalImage updates local image +func (s *ScalewayAPI) PutMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string, local MarketLocalImage) error { + resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage), local) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// DeleteMarketPlaceImage deletes image +func (s *ScalewayAPI) DeleteMarketPlaceImage(uudImage string) error { + resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v", uudImage)) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// DeleteMarketPlaceImageVersion delete image version +func (s *ScalewayAPI) DeleteMarketPlaceImageVersion(uuidImage, uuidVersion string) error { + resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%v", uuidImage, uuidVersion)) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// DeleteMarketPlaceLocalImage deletes local image +func (s *ScalewayAPI) DeleteMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string) error { + resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage)) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// ResolveTTYUrl return an URL to get a tty +func (s *ScalewayAPI) ResolveTTYUrl() string { + switch s.Region { + case "par1", "": + return "https://tty-par1.scaleway.com/v2/" + case "ams1": + return "https://tty-ams1.scaleway.com" + } + return "" +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go new file mode 100644 index 000000000..1d9771a0e --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go @@ -0,0 +1,814 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +package api + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/moul/anonuuid" + "github.com/renstrom/fuzzysearch/fuzzy" +) + +const ( + // CacheRegion permits to access at the region field + CacheRegion = iota + // CacheArch permits to access at the arch field + CacheArch + // CacheOwner permits to access at the owner field + CacheOwner + // CacheTitle permits to access at the title field + CacheTitle + // CacheMarketPlaceUUID is used to determine the UUID of local images + CacheMarketPlaceUUID + // CacheMaxfield is used to determine the size of array + CacheMaxfield +) + +// ScalewayCache is used not to query the API to resolve full identifiers +type ScalewayCache struct { + // Images contains names of Scaleway images indexed by identifier + Images map[string][CacheMaxfield]string `json:"images"` + + // Snapshots contains names of Scaleway snapshots indexed by identifier + Snapshots map[string][CacheMaxfield]string `json:"snapshots"` + + // Volumes contains names of Scaleway volumes indexed by identifier + Volumes map[string][CacheMaxfield]string `json:"volumes"` + + // Bootscripts contains names of Scaleway bootscripts indexed by identifier + Bootscripts map[string][CacheMaxfield]string `json:"bootscripts"` + + // Servers contains names of Scaleway servers indexed by identifier + Servers map[string][CacheMaxfield]string `json:"servers"` + + // Path is the path to the cache file + Path string `json:"-"` + + // Modified tells if the cache needs to be overwritten or not + Modified bool `json:"-"` + + // Lock allows ScalewayCache to be used concurrently + Lock sync.Mutex `json:"-"` + + hookSave func() +} + +const ( + // IdentifierUnknown is used when we don't know explicitly the type key of the object (used for nil comparison) + IdentifierUnknown = 1 << iota + // IdentifierServer is the type key of cached server objects + IdentifierServer + // IdentifierImage is the type key of cached image objects + IdentifierImage + // IdentifierSnapshot is the type key of cached snapshot objects + IdentifierSnapshot + // IdentifierBootscript is the type key of cached bootscript objects + IdentifierBootscript + // IdentifierVolume is the type key of cached volume objects + IdentifierVolume +) + +// ScalewayResolverResult is a structure containing human-readable information +// about resolver results. This structure is used to display the user choices. +type ScalewayResolverResult struct { + Identifier string + Type int + Name string + Arch string + Needle string + RankMatch int + Region string +} + +// ScalewayResolverResults is a list of `ScalewayResolverResult` +type ScalewayResolverResults []ScalewayResolverResult + +// NewScalewayResolverResult returns a new ScalewayResolverResult +func NewScalewayResolverResult(Identifier, Name, Arch, Region string, Type int) (ScalewayResolverResult, error) { + if err := anonuuid.IsUUID(Identifier); err != nil { + return ScalewayResolverResult{}, err + } + return ScalewayResolverResult{ + Identifier: Identifier, + Type: Type, + Name: Name, + Arch: Arch, + Region: Region, + }, nil +} + +func (s ScalewayResolverResults) Len() int { + return len(s) +} + +func (s ScalewayResolverResults) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s ScalewayResolverResults) Less(i, j int) bool { + return s[i].RankMatch < s[j].RankMatch +} + +// TruncIdentifier returns first 8 characters of an Identifier (UUID) +func (s *ScalewayResolverResult) TruncIdentifier() string { + return s.Identifier[:8] +} + +func identifierTypeName(kind int) string { + switch kind { + case IdentifierServer: + return "Server" + case IdentifierImage: + return "Image" + case IdentifierSnapshot: + return "Snapshot" + case IdentifierVolume: + return "Volume" + case IdentifierBootscript: + return "Bootscript" + } + return "" +} + +// CodeName returns a full resource name with typed prefix +func (s *ScalewayResolverResult) CodeName() string { + name := strings.ToLower(s.Name) + name = regexp.MustCompile(`[^a-z0-9-]`).ReplaceAllString(name, "-") + name = regexp.MustCompile(`--+`).ReplaceAllString(name, "-") + name = strings.Trim(name, "-") + + return fmt.Sprintf("%s:%s", strings.ToLower(identifierTypeName(s.Type)), name) +} + +// FilterByArch deletes the elements which not match with arch +func (s *ScalewayResolverResults) FilterByArch(arch string) { +REDO: + for i := range *s { + if (*s)[i].Arch != arch { + (*s)[i] = (*s)[len(*s)-1] + *s = (*s)[:len(*s)-1] + goto REDO + } + } +} + +// NewScalewayCache loads a per-user cache +func NewScalewayCache(hookSave func()) (*ScalewayCache, error) { + var cache ScalewayCache + + cache.hookSave = hookSave + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + homeDir = "/tmp" + } + cachePath := filepath.Join(homeDir, ".scw-cache.db") + cache.Path = cachePath + _, err := os.Stat(cachePath) + if os.IsNotExist(err) { + cache.Clear() + return &cache, nil + } else if err != nil { + return nil, err + } + file, err := ioutil.ReadFile(cachePath) + if err != nil { + return nil, err + } + err = json.Unmarshal(file, &cache) + if err != nil { + // fix compatibility with older version + if err = os.Remove(cachePath); err != nil { + return nil, err + } + cache.Clear() + return &cache, nil + } + if cache.Images == nil { + cache.Images = make(map[string][CacheMaxfield]string) + } + if cache.Snapshots == nil { + cache.Snapshots = make(map[string][CacheMaxfield]string) + } + if cache.Volumes == nil { + cache.Volumes = make(map[string][CacheMaxfield]string) + } + if cache.Servers == nil { + cache.Servers = make(map[string][CacheMaxfield]string) + } + if cache.Bootscripts == nil { + cache.Bootscripts = make(map[string][CacheMaxfield]string) + } + return &cache, nil +} + +// Clear removes all information from the cache +func (c *ScalewayCache) Clear() { + c.Images = make(map[string][CacheMaxfield]string) + c.Snapshots = make(map[string][CacheMaxfield]string) + c.Volumes = make(map[string][CacheMaxfield]string) + c.Bootscripts = make(map[string][CacheMaxfield]string) + c.Servers = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// Flush flushes the cache database +func (c *ScalewayCache) Flush() error { + return os.Remove(c.Path) +} + +// Save atomically overwrites the current cache database +func (c *ScalewayCache) Save() error { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.hookSave() + if c.Modified { + file, err := ioutil.TempFile(filepath.Dir(c.Path), filepath.Base(c.Path)) + if err != nil { + return err + } + defer file.Close() + if err := json.NewEncoder(file).Encode(c); err != nil { + os.Remove(file.Name()) + return err + } + + if err := os.Rename(file.Name(), c.Path); err != nil { + os.Remove(file.Name()) + return err + } + } + return nil +} + +// ComputeRankMatch fills `ScalewayResolverResult.RankMatch` with its `fuzzy` score +func (s *ScalewayResolverResult) ComputeRankMatch(needle string) { + s.Needle = needle + s.RankMatch = fuzzy.RankMatch(needle, s.Name) +} + +// LookUpImages attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpImages(needle string, acceptUUID bool) (ScalewayResolverResults, error) { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + var exactMatches ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Images[needle]; ok { + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + needle = regexp.MustCompile(`^user/`).ReplaceAllString(needle, "") + // FIXME: if 'user/' is in needle, only watch for a user image + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + for identifier, fields := range c.Images { + if fields[CacheTitle] == needle { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } else if strings.HasPrefix(fields[CacheMarketPlaceUUID], needle) || nameRegex.MatchString(fields[CacheMarketPlaceUUID]) { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches, nil + } + + return removeDuplicatesResults(res), nil +} + +// LookUpSnapshots attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpSnapshots(needle string, acceptUUID bool) (ScalewayResolverResults, error) { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + var exactMatches ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Snapshots[needle]; ok { + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierSnapshot) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + needle = regexp.MustCompile(`^user/`).ReplaceAllString(needle, "") + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + for identifier, fields := range c.Snapshots { + if fields[CacheTitle] == needle { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierSnapshot) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierSnapshot) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches, nil + } + + return removeDuplicatesResults(res), nil +} + +// LookUpVolumes attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpVolumes(needle string, acceptUUID bool) (ScalewayResolverResults, error) { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + var exactMatches ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Volumes[needle]; ok { + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierVolume) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + for identifier, fields := range c.Volumes { + if fields[CacheTitle] == needle { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierVolume) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierVolume) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches, nil + } + + return removeDuplicatesResults(res), nil +} + +// LookUpBootscripts attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpBootscripts(needle string, acceptUUID bool) (ScalewayResolverResults, error) { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + var exactMatches ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Bootscripts[needle]; ok { + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierBootscript) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + for identifier, fields := range c.Bootscripts { + if fields[CacheTitle] == needle { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierBootscript) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierBootscript) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches, nil + } + + return removeDuplicatesResults(res), nil +} + +// LookUpServers attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpServers(needle string, acceptUUID bool) (ScalewayResolverResults, error) { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + var exactMatches ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Servers[needle]; ok { + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierServer) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + for identifier, fields := range c.Servers { + if fields[CacheTitle] == needle { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierServer) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierServer) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches, nil + } + + return removeDuplicatesResults(res), nil +} + +// removeDuplicatesResults transforms an array into a unique array +func removeDuplicatesResults(elements ScalewayResolverResults) ScalewayResolverResults { + encountered := map[string]ScalewayResolverResult{} + + // Create a map of all unique elements. + for v := range elements { + encountered[elements[v].Identifier] = elements[v] + } + + // Place all keys from the map into a slice. + results := ScalewayResolverResults{} + for _, result := range encountered { + results = append(results, result) + } + return results +} + +// parseNeedle parses a user needle and try to extract a forced object type +// i.e: +// - server:blah-blah -> kind=server, needle=blah-blah +// - blah-blah -> kind="", needle=blah-blah +// - not-existing-type:blah-blah +func parseNeedle(input string) (identifierType int, needle string) { + parts := strings.Split(input, ":") + if len(parts) == 2 { + switch parts[0] { + case "server": + return IdentifierServer, parts[1] + case "image": + return IdentifierImage, parts[1] + case "snapshot": + return IdentifierSnapshot, parts[1] + case "bootscript": + return IdentifierBootscript, parts[1] + case "volume": + return IdentifierVolume, parts[1] + } + } + return IdentifierUnknown, input +} + +// LookUpIdentifiers attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpIdentifiers(needle string) (ScalewayResolverResults, error) { + results := ScalewayResolverResults{} + + identifierType, needle := parseNeedle(needle) + + if identifierType&(IdentifierUnknown|IdentifierServer) > 0 { + servers, err := c.LookUpServers(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range servers { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, IdentifierServer) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + + if identifierType&(IdentifierUnknown|IdentifierImage) > 0 { + images, err := c.LookUpImages(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range images { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + + if identifierType&(IdentifierUnknown|IdentifierSnapshot) > 0 { + snapshots, err := c.LookUpSnapshots(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range snapshots { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, IdentifierSnapshot) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + + if identifierType&(IdentifierUnknown|IdentifierVolume) > 0 { + volumes, err := c.LookUpVolumes(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range volumes { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, IdentifierVolume) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + + if identifierType&(IdentifierUnknown|IdentifierBootscript) > 0 { + bootscripts, err := c.LookUpBootscripts(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range bootscripts { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, IdentifierBootscript) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + return results, nil +} + +// InsertServer registers a server in the cache +func (c *ScalewayCache) InsertServer(identifier, region, arch, owner, name string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Servers[identifier] + if !exists || fields[CacheTitle] != name { + c.Servers[identifier] = [CacheMaxfield]string{region, arch, owner, name} + c.Modified = true + } +} + +// RemoveServer removes a server from the cache +func (c *ScalewayCache) RemoveServer(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Servers, identifier) + c.Modified = true +} + +// ClearServers removes all servers from the cache +func (c *ScalewayCache) ClearServers() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Servers = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// InsertImage registers an image in the cache +func (c *ScalewayCache) InsertImage(identifier, region, arch, owner, name, marketPlaceUUID string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Images[identifier] + if !exists || fields[CacheTitle] != name { + c.Images[identifier] = [CacheMaxfield]string{region, arch, owner, name, marketPlaceUUID} + c.Modified = true + } +} + +// RemoveImage removes a server from the cache +func (c *ScalewayCache) RemoveImage(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Images, identifier) + c.Modified = true +} + +// ClearImages removes all images from the cache +func (c *ScalewayCache) ClearImages() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Images = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// InsertSnapshot registers an snapshot in the cache +func (c *ScalewayCache) InsertSnapshot(identifier, region, arch, owner, name string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Snapshots[identifier] + if !exists || fields[CacheTitle] != name { + c.Snapshots[identifier] = [CacheMaxfield]string{region, arch, owner, name} + c.Modified = true + } +} + +// RemoveSnapshot removes a server from the cache +func (c *ScalewayCache) RemoveSnapshot(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Snapshots, identifier) + c.Modified = true +} + +// ClearSnapshots removes all snapshots from the cache +func (c *ScalewayCache) ClearSnapshots() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Snapshots = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// InsertVolume registers an volume in the cache +func (c *ScalewayCache) InsertVolume(identifier, region, arch, owner, name string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Volumes[identifier] + if !exists || fields[CacheTitle] != name { + c.Volumes[identifier] = [CacheMaxfield]string{region, arch, owner, name} + c.Modified = true + } +} + +// RemoveVolume removes a server from the cache +func (c *ScalewayCache) RemoveVolume(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Volumes, identifier) + c.Modified = true +} + +// ClearVolumes removes all volumes from the cache +func (c *ScalewayCache) ClearVolumes() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Volumes = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// InsertBootscript registers an bootscript in the cache +func (c *ScalewayCache) InsertBootscript(identifier, region, arch, owner, name string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Bootscripts[identifier] + if !exists || fields[CacheTitle] != name { + c.Bootscripts[identifier] = [CacheMaxfield]string{region, arch, owner, name} + c.Modified = true + } +} + +// RemoveBootscript removes a bootscript from the cache +func (c *ScalewayCache) RemoveBootscript(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Bootscripts, identifier) + c.Modified = true +} + +// ClearBootscripts removes all bootscripts from the cache +func (c *ScalewayCache) ClearBootscripts() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Bootscripts = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// GetNbServers returns the number of servers in the cache +func (c *ScalewayCache) GetNbServers() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Servers) +} + +// GetNbImages returns the number of images in the cache +func (c *ScalewayCache) GetNbImages() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Images) +} + +// GetNbSnapshots returns the number of snapshots in the cache +func (c *ScalewayCache) GetNbSnapshots() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Snapshots) +} + +// GetNbVolumes returns the number of volumes in the cache +func (c *ScalewayCache) GetNbVolumes() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Volumes) +} + +// GetNbBootscripts returns the number of bootscripts in the cache +func (c *ScalewayCache) GetNbBootscripts() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Bootscripts) +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/helpers.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/helpers.go new file mode 100644 index 000000000..3a59e465d --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/helpers.go @@ -0,0 +1,685 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +package api + +import ( + "errors" + "fmt" + "os" + "sort" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/dustin/go-humanize" + "github.com/moul/anonuuid" + "github.com/scaleway/scaleway-cli/pkg/utils" +) + +// ScalewayResolvedIdentifier represents a list of matching identifier for a specifier pattern +type ScalewayResolvedIdentifier struct { + // Identifiers holds matching identifiers + Identifiers ScalewayResolverResults + + // Needle is the criteria used to lookup identifiers + Needle string +} + +// ScalewayImageInterface is an interface to multiple Scaleway items +type ScalewayImageInterface struct { + CreationDate time.Time + Identifier string + Name string + Tag string + VirtualSize uint64 + Public bool + Type string + Organization string + Archs []string + Region []string +} + +// ResolveGateway tries to resolve a server public ip address, else returns the input string, i.e. IPv4, hostname +func ResolveGateway(api *ScalewayAPI, gateway string) (string, error) { + if gateway == "" { + return "", nil + } + + // Parses optional type prefix, i.e: "server:name" -> "name" + _, gateway = parseNeedle(gateway) + + servers, err := api.ResolveServer(gateway) + if err != nil { + return "", err + } + + if len(servers) == 0 { + return gateway, nil + } + + if len(servers) > 1 { + return "", showResolverResults(gateway, servers) + } + + // if len(servers) == 1 { + server, err := api.GetServer(servers[0].Identifier) + if err != nil { + return "", err + } + return server.PublicAddress.IP, nil +} + +// CreateVolumeFromHumanSize creates a volume on the API with a human readable size +func CreateVolumeFromHumanSize(api *ScalewayAPI, size string) (*string, error) { + bytes, err := humanize.ParseBytes(size) + if err != nil { + return nil, err + } + + var newVolume ScalewayVolumeDefinition + newVolume.Name = size + newVolume.Size = bytes + newVolume.Type = "l_ssd" + + volumeID, err := api.PostVolume(newVolume) + if err != nil { + return nil, err + } + + return &volumeID, nil +} + +// fillIdentifierCache fills the cache by fetching from the API +func fillIdentifierCache(api *ScalewayAPI, identifierType int) { + log.Debugf("Filling the cache") + var wg sync.WaitGroup + wg.Add(5) + go func() { + if identifierType&(IdentifierUnknown|IdentifierServer) > 0 { + api.GetServers(true, 0) + } + wg.Done() + }() + go func() { + if identifierType&(IdentifierUnknown|IdentifierImage) > 0 { + api.GetImages() + } + wg.Done() + }() + go func() { + if identifierType&(IdentifierUnknown|IdentifierSnapshot) > 0 { + api.GetSnapshots() + } + wg.Done() + }() + go func() { + if identifierType&(IdentifierUnknown|IdentifierVolume) > 0 { + api.GetVolumes() + } + wg.Done() + }() + go func() { + if identifierType&(IdentifierUnknown|IdentifierBootscript) > 0 { + api.GetBootscripts() + } + wg.Done() + }() + wg.Wait() +} + +// GetIdentifier returns a an identifier if the resolved needles only match one element, else, it exists the program +func GetIdentifier(api *ScalewayAPI, needle string) (*ScalewayResolverResult, error) { + idents, err := ResolveIdentifier(api, needle) + if err != nil { + return nil, err + } + + if len(idents) == 1 { + return &idents[0], nil + } + if len(idents) == 0 { + return nil, fmt.Errorf("No such identifier: %s", needle) + } + + sort.Sort(idents) + for _, identifier := range idents { + // FIXME: also print the name + fmt.Fprintf(os.Stderr, "- %s\n", identifier.Identifier) + } + return nil, fmt.Errorf("Too many candidates for %s (%d)", needle, len(idents)) +} + +// ResolveIdentifier resolves needle provided by the user +func ResolveIdentifier(api *ScalewayAPI, needle string) (ScalewayResolverResults, error) { + idents, err := api.Cache.LookUpIdentifiers(needle) + if err != nil { + return idents, err + } + if len(idents) > 0 { + return idents, nil + } + + identifierType, _ := parseNeedle(needle) + fillIdentifierCache(api, identifierType) + + return api.Cache.LookUpIdentifiers(needle) +} + +// ResolveIdentifiers resolves needles provided by the user +func ResolveIdentifiers(api *ScalewayAPI, needles []string, out chan ScalewayResolvedIdentifier) { + // first attempt, only lookup from the cache + var unresolved []string + for _, needle := range needles { + idents, err := api.Cache.LookUpIdentifiers(needle) + if err != nil { + api.Logger.Fatalf("%s", err) + } + if len(idents) == 0 { + unresolved = append(unresolved, needle) + } else { + out <- ScalewayResolvedIdentifier{ + Identifiers: idents, + Needle: needle, + } + } + } + // fill the cache by fetching from the API and resolve missing identifiers + if len(unresolved) > 0 { + // compute identifierType: + // if identifierType is the same for every unresolved needle, + // we use it directly, else, we choose IdentifierUnknown to + // fulfill every types of cache + identifierType, _ := parseNeedle(unresolved[0]) + for _, needle := range unresolved { + newIdentifierType, _ := parseNeedle(needle) + if identifierType != newIdentifierType { + identifierType = IdentifierUnknown + break + } + } + + // fill all the cache + fillIdentifierCache(api, identifierType) + + // lookup again in the cache + for _, needle := range unresolved { + idents, err := api.Cache.LookUpIdentifiers(needle) + if err != nil { + api.Logger.Fatalf("%s", err) + } + out <- ScalewayResolvedIdentifier{ + Identifiers: idents, + Needle: needle, + } + } + } + + close(out) +} + +// InspectIdentifierResult is returned by `InspectIdentifiers` and contains the inspected `Object` with its `Type` +type InspectIdentifierResult struct { + Type int + Object interface{} +} + +// InspectIdentifiers inspects identifiers concurrently +func InspectIdentifiers(api *ScalewayAPI, ci chan ScalewayResolvedIdentifier, cj chan InspectIdentifierResult, arch string) { + var wg sync.WaitGroup + for { + idents, ok := <-ci + if !ok { + break + } + idents.Identifiers = FilterImagesByArch(idents.Identifiers, arch) + idents.Identifiers = FilterImagesByRegion(idents.Identifiers, api.Region) + if len(idents.Identifiers) != 1 { + if len(idents.Identifiers) == 0 { + log.Errorf("Unable to resolve identifier %s", idents.Needle) + } else { + logrus.Fatal(showResolverResults(idents.Needle, idents.Identifiers)) + } + } else { + ident := idents.Identifiers[0] + wg.Add(1) + go func() { + var obj interface{} + var err error + + switch ident.Type { + case IdentifierServer: + obj, err = api.GetServer(ident.Identifier) + case IdentifierImage: + obj, err = api.GetImage(ident.Identifier) + case IdentifierSnapshot: + obj, err = api.GetSnapshot(ident.Identifier) + case IdentifierVolume: + obj, err = api.GetVolume(ident.Identifier) + case IdentifierBootscript: + obj, err = api.GetBootscript(ident.Identifier) + } + if err == nil && obj != nil { + cj <- InspectIdentifierResult{ + Type: ident.Type, + Object: obj, + } + } + wg.Done() + }() + } + } + wg.Wait() + close(cj) +} + +// ConfigCreateServer represents the options sent to CreateServer and defining a server +type ConfigCreateServer struct { + ImageName string + Name string + Bootscript string + Env string + AdditionalVolumes string + IP string + CommercialType string + DynamicIPRequired bool + EnableIPV6 bool +} + +// CreateServer creates a server using API based on typical server fields +func CreateServer(api *ScalewayAPI, c *ConfigCreateServer) (string, error) { + commercialType := os.Getenv("SCW_COMMERCIAL_TYPE") + if commercialType == "" { + commercialType = c.CommercialType + } + if len(commercialType) < 2 { + return "", errors.New("Invalid commercial type") + } + + if c.Name == "" { + c.Name = strings.Replace(namesgenerator.GetRandomName(0), "_", "-", -1) + } + + var server ScalewayServerDefinition + + server.CommercialType = commercialType + server.Volumes = make(map[string]string) + server.DynamicIPRequired = &c.DynamicIPRequired + server.EnableIPV6 = c.EnableIPV6 + if commercialType == "" { + return "", errors.New("You need to specify a commercial-type") + } + if c.IP != "" { + if anonuuid.IsUUID(c.IP) == nil { + server.PublicIP = c.IP + } else { + ips, err := api.GetIPS() + if err != nil { + return "", err + } + for _, ip := range ips.IPS { + if ip.Address == c.IP { + server.PublicIP = ip.ID + break + } + } + if server.PublicIP == "" { + return "", fmt.Errorf("IP address %v not found", c.IP) + } + } + } + server.Tags = []string{} + if c.Env != "" { + server.Tags = strings.Split(c.Env, " ") + } + switch c.CommercialType { + case "VC1M", "X64-4GB": + if c.AdditionalVolumes == "" { + c.AdditionalVolumes = "50G" + log.Debugf("This server needs a least 50G") + } + case "VC1L", "X64-8GB", "X64-15GB": + if c.AdditionalVolumes == "" { + c.AdditionalVolumes = "150G" + log.Debugf("This server needs a least 150G") + } + case "X64-30GB": + if c.AdditionalVolumes == "" { + c.AdditionalVolumes = "100G 150G" + log.Debugf("This server needs a least 300G") + } + case "X64-60GB": + if c.AdditionalVolumes == "" { + c.AdditionalVolumes = "50G 150G 150G" + log.Debugf("This server needs a least 400G") + } + case "X64-120GB": + if c.AdditionalVolumes == "" { + c.AdditionalVolumes = "150G 150G 150G" + log.Debugf("This server needs a least 500G") + } + } + if c.AdditionalVolumes != "" { + volumes := strings.Split(c.AdditionalVolumes, " ") + for i := range volumes { + volumeID, err := CreateVolumeFromHumanSize(api, volumes[i]) + if err != nil { + return "", err + } + + volumeIDx := fmt.Sprintf("%d", i+1) + server.Volumes[volumeIDx] = *volumeID + } + } + arch := os.Getenv("SCW_TARGET_ARCH") + if arch == "" { + server.CommercialType = strings.ToUpper(server.CommercialType) + switch server.CommercialType[:2] { + case "C1": + arch = "arm" + case "C2", "VC", "X6": + arch = "x86_64" + default: + return "", fmt.Errorf("%s wrong commercial type", server.CommercialType) + } + } + imageIdentifier := &ScalewayImageIdentifier{ + Arch: arch, + } + server.Name = c.Name + inheritingVolume := false + _, err := humanize.ParseBytes(c.ImageName) + if err == nil { + // Create a new root volume + volumeID, errCreateVol := CreateVolumeFromHumanSize(api, c.ImageName) + if errCreateVol != nil { + return "", errCreateVol + } + server.Volumes["0"] = *volumeID + } else { + // Use an existing image + inheritingVolume = true + if anonuuid.IsUUID(c.ImageName) == nil { + server.Image = &c.ImageName + } else { + imageIdentifier, err = api.GetImageID(c.ImageName, arch) + if err != nil { + return "", err + } + if imageIdentifier.Identifier != "" { + server.Image = &imageIdentifier.Identifier + } else { + snapshotID, errGetSnapID := api.GetSnapshotID(c.ImageName) + if errGetSnapID != nil { + return "", errGetSnapID + } + snapshot, errGetSnap := api.GetSnapshot(snapshotID) + if errGetSnap != nil { + return "", errGetSnap + } + if snapshot.BaseVolume.Identifier == "" { + return "", fmt.Errorf("snapshot %v does not have base volume", snapshot.Name) + } + server.Volumes["0"] = snapshot.BaseVolume.Identifier + } + } + } + + if c.Bootscript != "" { + bootscript := "" + + if anonuuid.IsUUID(c.Bootscript) == nil { + bootscript = c.Bootscript + } else { + var errGetBootScript error + + bootscript, errGetBootScript = api.GetBootscriptID(c.Bootscript, imageIdentifier.Arch) + if errGetBootScript != nil { + return "", errGetBootScript + } + } + server.Bootscript = &bootscript + } + serverID, err := api.PostServer(server) + if err != nil { + return "", err + } + + // For inherited volumes, we prefix the name with server hostname + if inheritingVolume { + createdServer, err := api.GetServer(serverID) + if err != nil { + return "", err + } + currentVolume := createdServer.Volumes["0"] + + var volumePayload ScalewayVolumePutDefinition + newName := fmt.Sprintf("%s-%s", createdServer.Hostname, currentVolume.Name) + volumePayload.Name = &newName + volumePayload.CreationDate = ¤tVolume.CreationDate + volumePayload.Organization = ¤tVolume.Organization + volumePayload.Server.Identifier = ¤tVolume.Server.Identifier + volumePayload.Server.Name = ¤tVolume.Server.Name + volumePayload.Identifier = ¤tVolume.Identifier + volumePayload.Size = ¤tVolume.Size + volumePayload.ModificationDate = ¤tVolume.ModificationDate + volumePayload.ExportURI = ¤tVolume.ExportURI + volumePayload.VolumeType = ¤tVolume.VolumeType + + err = api.PutVolume(currentVolume.Identifier, volumePayload) + if err != nil { + return "", err + } + } + + return serverID, nil +} + +// WaitForServerState asks API in a loop until a server matches a wanted state +func WaitForServerState(api *ScalewayAPI, serverID string, targetState string) (*ScalewayServer, error) { + var server *ScalewayServer + var err error + + var currentState string + + for { + server, err = api.GetServer(serverID) + if err != nil { + return nil, err + } + if currentState != server.State { + log.Infof("Server changed state to '%s'", server.State) + currentState = server.State + } + if server.State == targetState { + break + } + time.Sleep(1 * time.Second) + } + + return server, nil +} + +// WaitForServerReady wait for a server state to be running, then wait for the SSH port to be available +func WaitForServerReady(api *ScalewayAPI, serverID, gateway string) (*ScalewayServer, error) { + promise := make(chan bool) + var server *ScalewayServer + var err error + var currentState string + + go func() { + defer close(promise) + + for { + server, err = api.GetServer(serverID) + if err != nil { + promise <- false + return + } + if currentState != server.State { + log.Infof("Server changed state to '%s'", server.State) + currentState = server.State + } + if server.State == "running" { + break + } + if server.State == "stopped" { + err = fmt.Errorf("The server has been stopped") + promise <- false + return + } + time.Sleep(1 * time.Second) + } + + if gateway == "" { + dest := fmt.Sprintf("%s:22", server.PublicAddress.IP) + log.Debugf("Waiting for server SSH port %s", dest) + err = utils.WaitForTCPPortOpen(dest) + if err != nil { + promise <- false + return + } + } else { + dest := fmt.Sprintf("%s:22", gateway) + log.Debugf("Waiting for server SSH port %s", dest) + err = utils.WaitForTCPPortOpen(dest) + if err != nil { + promise <- false + return + } + log.Debugf("Check for SSH port through the gateway: %s", server.PrivateIP) + timeout := time.Tick(120 * time.Second) + for { + select { + case <-timeout: + err = fmt.Errorf("Timeout: unable to ping %s", server.PrivateIP) + goto OUT + default: + if utils.SSHExec("", server.PrivateIP, "root", 22, []string{ + "nc", + "-z", + "-w", + "1", + server.PrivateIP, + "22", + }, false, gateway, false) == nil { + goto OUT + } + time.Sleep(2 * time.Second) + } + } + OUT: + if err != nil { + logrus.Info(err) + err = nil + } + } + promise <- true + }() + + loop := 0 + for { + select { + case done := <-promise: + utils.LogQuiet("\r \r") + if !done { + return nil, err + } + return server, nil + case <-time.After(time.Millisecond * 100): + utils.LogQuiet(fmt.Sprintf("\r%c\r", "-\\|/"[loop%4])) + loop = loop + 1 + if loop == 5 { + loop = 0 + } + } + } +} + +// WaitForServerStopped wait for a server state to be stopped +func WaitForServerStopped(api *ScalewayAPI, serverID string) (*ScalewayServer, error) { + server, err := WaitForServerState(api, serverID, "stopped") + if err != nil { + return nil, err + } + return server, nil +} + +// ByCreationDate sorts images by CreationDate field +type ByCreationDate []ScalewayImageInterface + +func (a ByCreationDate) Len() int { return len(a) } +func (a ByCreationDate) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByCreationDate) Less(i, j int) bool { return a[j].CreationDate.Before(a[i].CreationDate) } + +// StartServer start a server based on its needle, can optionaly block while server is booting +func StartServer(api *ScalewayAPI, needle string, wait bool) error { + server, err := api.GetServerID(needle) + if err != nil { + return err + } + + if err = api.PostServerAction(server, "poweron"); err != nil { + return err + } + + if wait { + _, err = WaitForServerReady(api, server, "") + if err != nil { + return fmt.Errorf("failed to wait for server %s to be ready, %v", needle, err) + } + } + return nil +} + +// StartServerOnce wraps StartServer for golang channel +func StartServerOnce(api *ScalewayAPI, needle string, wait bool, successChan chan string, errChan chan error) { + err := StartServer(api, needle, wait) + + if err != nil { + errChan <- err + return + } + successChan <- needle +} + +// DeleteServerForce tries to delete a server using multiple ways +func (a *ScalewayAPI) DeleteServerForce(serverID string) error { + // FIXME: also delete attached volumes and ip address + // FIXME: call delete and stop -t in parallel to speed up process + err := a.DeleteServer(serverID) + if err == nil { + logrus.Infof("Server '%s' successfully deleted", serverID) + return nil + } + + err = a.PostServerAction(serverID, "terminate") + if err == nil { + logrus.Infof("Server '%s' successfully terminated", serverID) + return nil + } + + // FIXME: retry in a loop until timeout or Control+C + logrus.Errorf("Failed to delete server %s", serverID) + logrus.Errorf("Try to run 'scw rm -f %s' later", serverID) + return err +} + +// GetSSHFingerprintFromServer returns an array which containts ssh-host-fingerprints +func (a *ScalewayAPI) GetSSHFingerprintFromServer(serverID string) []string { + ret := []string{} + + if value, err := a.GetUserdata(serverID, "ssh-host-fingerprints", false); err == nil { + PublicKeys := strings.Split(string(*value), "\n") + for i := range PublicKeys { + if fingerprint, err := utils.SSHGetFingerprint([]byte(PublicKeys[i])); err == nil { + ret = append(ret, fingerprint) + } + } + } + return ret +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go new file mode 100644 index 000000000..58ad93716 --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go @@ -0,0 +1,77 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +package api + +import ( + "fmt" + "log" + "net/http" + "os" +) + +// Logger handles logging concerns for the Scaleway API SDK +type Logger interface { + LogHTTP(*http.Request) + Fatalf(format string, v ...interface{}) + Debugf(format string, v ...interface{}) + Infof(format string, v ...interface{}) + Warnf(format string, v ...interface{}) +} + +// NewDefaultLogger returns a logger which is configured for stdout +func NewDefaultLogger() Logger { + return &defaultLogger{ + Logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +type defaultLogger struct { + *log.Logger +} + +func (l *defaultLogger) LogHTTP(r *http.Request) { + l.Printf("%s %s\n", r.Method, r.URL.RawPath) +} + +func (l *defaultLogger) Fatalf(format string, v ...interface{}) { + l.Printf("[FATAL] %s\n", fmt.Sprintf(format, v)) + os.Exit(1) +} + +func (l *defaultLogger) Debugf(format string, v ...interface{}) { + l.Printf("[DEBUG] %s\n", fmt.Sprintf(format, v)) +} + +func (l *defaultLogger) Infof(format string, v ...interface{}) { + l.Printf("[INFO ] %s\n", fmt.Sprintf(format, v)) +} + +func (l *defaultLogger) Warnf(format string, v ...interface{}) { + l.Printf("[WARN ] %s\n", fmt.Sprintf(format, v)) +} + +type disableLogger struct { +} + +// NewDisableLogger returns a logger which is configured to do nothing +func NewDisableLogger() Logger { + return &disableLogger{} +} + +func (d *disableLogger) LogHTTP(r *http.Request) { +} + +func (d *disableLogger) Fatalf(format string, v ...interface{}) { + panic(fmt.Sprintf(format, v)) +} + +func (d *disableLogger) Debugf(format string, v ...interface{}) { +} + +func (d *disableLogger) Infof(format string, v ...interface{}) { +} + +func (d *disableLogger) Warnf(format string, v ...interface{}) { +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/sshcommand/sshcommand.go b/vendor/github.com/scaleway/scaleway-cli/pkg/sshcommand/sshcommand.go new file mode 100644 index 000000000..676b2f976 --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/sshcommand/sshcommand.go @@ -0,0 +1,124 @@ +package sshcommand + +import ( + "fmt" + "runtime" + "strings" +) + +// Command contains settings to build a ssh command +type Command struct { + Host string + User string + Port int + SSHOptions []string + Gateway *Command + Command []string + Debug bool + NoEscapeCommand bool + SkipHostKeyChecking bool + Quiet bool + AllocateTTY bool + EnableSSHKeyForwarding bool + + isGateway bool +} + +// New returns a minimal Command +func New(host string) *Command { + return &Command{ + Host: host, + } +} + +func (c *Command) applyDefaults() { + if strings.Contains(c.Host, "@") { + parts := strings.Split(c.Host, "@") + c.User = parts[0] + c.Host = parts[1] + } + + if c.Port == 0 { + c.Port = 22 + } + + if c.isGateway { + c.SSHOptions = []string{"-W", "%h:%p"} + } +} + +// Slice returns an execve compatible slice of arguments +func (c *Command) Slice() []string { + c.applyDefaults() + + slice := []string{} + + slice = append(slice, "ssh") + + if c.EnableSSHKeyForwarding { + slice = append(slice, "-A") + } + + if c.Quiet { + slice = append(slice, "-q") + } + + if c.SkipHostKeyChecking { + slice = append(slice, "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no") + } + + if len(c.SSHOptions) > 0 { + slice = append(slice, c.SSHOptions...) + } + + if c.Gateway != nil { + c.Gateway.isGateway = true + slice = append(slice, "-o", "ProxyCommand="+c.Gateway.String()) + } + + if c.User != "" { + slice = append(slice, "-l", c.User) + } + + slice = append(slice, c.Host) + + if c.AllocateTTY { + slice = append(slice, "-t", "-t") + } + + slice = append(slice, "-p", fmt.Sprintf("%d", c.Port)) + if len(c.Command) > 0 { + slice = append(slice, "--", "/bin/sh", "-e") + if c.Debug { + slice = append(slice, "-x") + } + slice = append(slice, "-c") + + var escapedCommand []string + if c.NoEscapeCommand { + escapedCommand = c.Command + } else { + escapedCommand = []string{} + for _, part := range c.Command { + escapedCommand = append(escapedCommand, fmt.Sprintf("%q", part)) + } + } + slice = append(slice, fmt.Sprintf("%q", strings.Join(escapedCommand, " "))) + } + if runtime.GOOS == "windows" { + slice[len(slice)-1] = slice[len(slice)-1] + " " // Why ? + } + return slice +} + +// String returns a copy-pasteable command, useful for debugging +func (c *Command) String() string { + slice := c.Slice() + for i := range slice { + quoted := fmt.Sprintf("%q", slice[i]) + if strings.Contains(slice[i], " ") || len(quoted) != len(slice[i])+2 { + slice[i] = quoted + } + } + return strings.Join(slice, " ") +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/utils/quiet.go b/vendor/github.com/scaleway/scaleway-cli/pkg/utils/quiet.go new file mode 100644 index 000000000..775918d8d --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/utils/quiet.go @@ -0,0 +1,30 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +// Package utils contains logquiet +package utils + +import ( + "fmt" + "os" +) + +// LogQuietStruct is a struct to store information about quiet state +type LogQuietStruct struct { + quiet bool +} + +var instanceQuiet LogQuietStruct + +// Quiet enable or disable quiet +func Quiet(option bool) { + instanceQuiet.quiet = option +} + +// LogQuiet Displays info if quiet is activated +func LogQuiet(str string) { + if !instanceQuiet.quiet { + fmt.Fprintf(os.Stderr, "%s", str) + } +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/utils/utils.go b/vendor/github.com/scaleway/scaleway-cli/pkg/utils/utils.go new file mode 100644 index 000000000..6f11f4869 --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/utils/utils.go @@ -0,0 +1,253 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +// scw helpers + +// Package utils contains helpers +package utils + +import ( + "crypto/md5" + "errors" + "fmt" + "io" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "strings" + "time" + + "golang.org/x/crypto/ssh" + + "github.com/Sirupsen/logrus" + log "github.com/Sirupsen/logrus" + "github.com/mattn/go-isatty" + "github.com/moul/gotty-client" + "github.com/scaleway/scaleway-cli/pkg/sshcommand" +) + +// SpawnRedirection is used to redirects the fluxes +type SpawnRedirection struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// SSHExec executes a command over SSH and redirects file-descriptors +func SSHExec(publicIPAddress, privateIPAddress, user string, port int, command []string, checkConnection bool, gateway string, enableSSHKeyForwarding bool) error { + gatewayUser := "root" + gatewayIPAddress := gateway + if strings.Contains(gateway, "@") { + parts := strings.Split(gatewayIPAddress, "@") + if len(parts) != 2 { + return fmt.Errorf("gateway: must be like root@IP") + } + gatewayUser = parts[0] + gatewayIPAddress = parts[1] + gateway = gatewayUser + "@" + gatewayIPAddress + } + + if publicIPAddress == "" && gatewayIPAddress == "" { + return errors.New("server does not have public IP") + } + if privateIPAddress == "" && gatewayIPAddress != "" { + return errors.New("server does not have private IP") + } + + if checkConnection { + useGateway := gatewayIPAddress != "" + if useGateway && !IsTCPPortOpen(fmt.Sprintf("%s:22", gatewayIPAddress)) { + return errors.New("gateway is not available, try again later") + } + if !useGateway && !IsTCPPortOpen(fmt.Sprintf("%s:%d", publicIPAddress, port)) { + return errors.New("server is not ready, try again later") + } + } + + sshCommand := NewSSHExecCmd(publicIPAddress, privateIPAddress, user, port, isatty.IsTerminal(os.Stdin.Fd()), command, gateway, enableSSHKeyForwarding) + + log.Debugf("Executing: %s", sshCommand) + + spawn := exec.Command("ssh", sshCommand.Slice()[1:]...) + spawn.Stdout = os.Stdout + spawn.Stdin = os.Stdin + spawn.Stderr = os.Stderr + return spawn.Run() +} + +// NewSSHExecCmd computes execve compatible arguments to run a command via ssh +func NewSSHExecCmd(publicIPAddress, privateIPAddress, user string, port int, allocateTTY bool, command []string, gatewayIPAddress string, enableSSHKeyForwarding bool) *sshcommand.Command { + quiet := os.Getenv("DEBUG") != "1" + secureExec := os.Getenv("SCW_SECURE_EXEC") == "1" + sshCommand := &sshcommand.Command{ + AllocateTTY: allocateTTY, + Command: command, + Host: publicIPAddress, + Quiet: quiet, + SkipHostKeyChecking: !secureExec, + User: user, + NoEscapeCommand: true, + Port: port, + EnableSSHKeyForwarding: enableSSHKeyForwarding, + } + if gatewayIPAddress != "" { + sshCommand.Host = privateIPAddress + sshCommand.Gateway = &sshcommand.Command{ + Host: gatewayIPAddress, + SkipHostKeyChecking: !secureExec, + AllocateTTY: allocateTTY, + Quiet: quiet, + User: user, + Port: port, + } + } + + return sshCommand +} + +// GeneratingAnSSHKey generates an SSH key +func GeneratingAnSSHKey(cfg SpawnRedirection, path string, name string) (string, error) { + args := []string{ + "-t", + "rsa", + "-b", + "4096", + "-f", + filepath.Join(path, name), + "-N", + "", + "-C", + "", + } + log.Infof("Executing commands %v", args) + spawn := exec.Command("ssh-keygen", args...) + spawn.Stdout = cfg.Stdout + spawn.Stdin = cfg.Stdin + spawn.Stderr = cfg.Stderr + return args[5], spawn.Run() +} + +// WaitForTCPPortOpen calls IsTCPPortOpen in a loop +func WaitForTCPPortOpen(dest string) error { + for { + if IsTCPPortOpen(dest) { + break + } + time.Sleep(1 * time.Second) + } + return nil +} + +// IsTCPPortOpen returns true if a TCP communication with "host:port" can be initialized +func IsTCPPortOpen(dest string) bool { + conn, err := net.DialTimeout("tcp", dest, time.Duration(2000)*time.Millisecond) + if err == nil { + defer conn.Close() + } + return err == nil +} + +// TruncIf ensures the input string does not exceed max size if cond is met +func TruncIf(str string, max int, cond bool) string { + if cond && len(str) > max { + return str[:max] + } + return str +} + +// Wordify convert complex name to a single word without special shell characters +func Wordify(str string) string { + str = regexp.MustCompile(`[^a-zA-Z0-9-]`).ReplaceAllString(str, "_") + str = regexp.MustCompile(`__+`).ReplaceAllString(str, "_") + str = strings.Trim(str, "_") + return str +} + +// PathToTARPathparts returns the two parts of a unix path +func PathToTARPathparts(fullPath string) (string, string) { + fullPath = strings.TrimRight(fullPath, "/") + return path.Dir(fullPath), path.Base(fullPath) +} + +// RemoveDuplicates transforms an array into a unique array +func RemoveDuplicates(elements []string) []string { + encountered := map[string]bool{} + + // Create a map of all unique elements. + for v := range elements { + encountered[elements[v]] = true + } + + // Place all keys from the map into a slice. + result := []string{} + for key := range encountered { + result = append(result, key) + } + return result +} + +// AttachToSerial tries to connect to server serial using 'gotty-client' and fallback with a help message +func AttachToSerial(serverID, apiToken, url string) (*gottyclient.Client, chan bool, error) { + gottyURL := os.Getenv("SCW_GOTTY_URL") + if gottyURL == "" { + gottyURL = url + } + URL := fmt.Sprintf("%s?arg=%s&arg=%s", gottyURL, apiToken, serverID) + + logrus.Debug("Connection to ", URL) + gottycli, err := gottyclient.NewClient(URL) + if err != nil { + return nil, nil, err + } + + if os.Getenv("SCW_TLSVERIFY") == "0" { + gottycli.SkipTLSVerify = true + } + + gottycli.UseProxyFromEnv = true + + if err = gottycli.Connect(); err != nil { + return nil, nil, err + } + done := make(chan bool) + + fmt.Println("You are connected, type 'Ctrl+q' to quit.") + go func() { + gottycli.Loop() + gottycli.Close() + done <- true + }() + return gottycli, done, nil +} + +func rfc4716hex(data []byte) string { + fingerprint := "" + + for i := 0; i < len(data); i++ { + fingerprint = fmt.Sprintf("%s%0.2x", fingerprint, data[i]) + if i != len(data)-1 { + fingerprint = fingerprint + ":" + } + } + return fingerprint +} + +// SSHGetFingerprint returns the fingerprint of an SSH key +func SSHGetFingerprint(key []byte) (string, error) { + publicKey, comment, _, _, err := ssh.ParseAuthorizedKey(key) + if err != nil { + return "", err + } + switch reflect.TypeOf(publicKey).String() { + case "*ssh.rsaPublicKey", "*ssh.dsaPublicKey", "*ssh.ecdsaPublicKey": + md5sum := md5.Sum(publicKey.Marshal()) + return publicKey.Type() + " " + rfc4716hex(md5sum[:]) + " " + comment, nil + default: + return "", errors.New("Can't handle this key") + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go new file mode 100644 index 000000000..18379a935 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -0,0 +1,951 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "bytes" + "io" + "sync" + "unicode/utf8" +) + +// EscapeCodes contains escape sequences that can be written to the terminal in +// order to achieve different styles of text. +type EscapeCodes struct { + // Foreground colors + Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte + + // Reset all attributes + Reset []byte +} + +var vt100EscapeCodes = EscapeCodes{ + Black: []byte{keyEscape, '[', '3', '0', 'm'}, + Red: []byte{keyEscape, '[', '3', '1', 'm'}, + Green: []byte{keyEscape, '[', '3', '2', 'm'}, + Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, + Blue: []byte{keyEscape, '[', '3', '4', 'm'}, + Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, + Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, + White: []byte{keyEscape, '[', '3', '7', 'm'}, + + Reset: []byte{keyEscape, '[', '0', 'm'}, +} + +// Terminal contains the state for running a VT100 terminal that is capable of +// reading lines of input. +type Terminal struct { + // AutoCompleteCallback, if non-null, is called for each keypress with + // the full input line and the current position of the cursor (in + // bytes, as an index into |line|). If it returns ok=false, the key + // press is processed normally. Otherwise it returns a replacement line + // and the new cursor position. + AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) + + // Escape contains a pointer to the escape codes for this terminal. + // It's always a valid pointer, although the escape codes themselves + // may be empty if the terminal doesn't support them. + Escape *EscapeCodes + + // lock protects the terminal and the state in this object from + // concurrent processing of a key press and a Write() call. + lock sync.Mutex + + c io.ReadWriter + prompt []rune + + // line is the current line being entered. + line []rune + // pos is the logical position of the cursor in line + pos int + // echo is true if local echo is enabled + echo bool + // pasteActive is true iff there is a bracketed paste operation in + // progress. + pasteActive bool + + // cursorX contains the current X value of the cursor where the left + // edge is 0. cursorY contains the row number where the first row of + // the current line is 0. + cursorX, cursorY int + // maxLine is the greatest value of cursorY so far. + maxLine int + + termWidth, termHeight int + + // outBuf contains the terminal data to be sent. + outBuf []byte + // remainder contains the remainder of any partial key sequences after + // a read. It aliases into inBuf. + remainder []byte + inBuf [256]byte + + // history contains previously entered commands so that they can be + // accessed with the up and down keys. + history stRingBuffer + // historyIndex stores the currently accessed history entry, where zero + // means the immediately previous entry. + historyIndex int + // When navigating up and down the history it's possible to return to + // the incomplete, initial line. That value is stored in + // historyPending. + historyPending string +} + +// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is +// a local terminal, that terminal must first have been put into raw mode. +// prompt is a string that is written at the start of each input line (i.e. +// "> "). +func NewTerminal(c io.ReadWriter, prompt string) *Terminal { + return &Terminal{ + Escape: &vt100EscapeCodes, + c: c, + prompt: []rune(prompt), + termWidth: 80, + termHeight: 24, + echo: true, + historyIndex: -1, + } +} + +const ( + keyCtrlD = 4 + keyCtrlU = 21 + keyEnter = '\r' + keyEscape = 27 + keyBackspace = 127 + keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota + keyUp + keyDown + keyLeft + keyRight + keyAltLeft + keyAltRight + keyHome + keyEnd + keyDeleteWord + keyDeleteLine + keyClearScreen + keyPasteStart + keyPasteEnd +) + +var ( + crlf = []byte{'\r', '\n'} + pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} + pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} +) + +// bytesToKey tries to parse a key sequence from b. If successful, it returns +// the key and the remainder of the input. Otherwise it returns utf8.RuneError. +func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { + if len(b) == 0 { + return utf8.RuneError, nil + } + + if !pasteActive { + switch b[0] { + case 1: // ^A + return keyHome, b[1:] + case 5: // ^E + return keyEnd, b[1:] + case 8: // ^H + return keyBackspace, b[1:] + case 11: // ^K + return keyDeleteLine, b[1:] + case 12: // ^L + return keyClearScreen, b[1:] + case 23: // ^W + return keyDeleteWord, b[1:] + } + } + + if b[0] != keyEscape { + if !utf8.FullRune(b) { + return utf8.RuneError, b + } + r, l := utf8.DecodeRune(b) + return r, b[l:] + } + + if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { + switch b[2] { + case 'A': + return keyUp, b[3:] + case 'B': + return keyDown, b[3:] + case 'C': + return keyRight, b[3:] + case 'D': + return keyLeft, b[3:] + case 'H': + return keyHome, b[3:] + case 'F': + return keyEnd, b[3:] + } + } + + if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { + switch b[5] { + case 'C': + return keyAltRight, b[6:] + case 'D': + return keyAltLeft, b[6:] + } + } + + if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { + return keyPasteStart, b[6:] + } + + if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { + return keyPasteEnd, b[6:] + } + + // If we get here then we have a key that we don't recognise, or a + // partial sequence. It's not clear how one should find the end of a + // sequence without knowing them all, but it seems that [a-zA-Z~] only + // appears at the end of a sequence. + for i, c := range b[0:] { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { + return keyUnknown, b[i+1:] + } + } + + return utf8.RuneError, b +} + +// queue appends data to the end of t.outBuf +func (t *Terminal) queue(data []rune) { + t.outBuf = append(t.outBuf, []byte(string(data))...) +} + +var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} +var space = []rune{' '} + +func isPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// moveCursorToPos appends data to t.outBuf which will move the cursor to the +// given, logical position in the text. +func (t *Terminal) moveCursorToPos(pos int) { + if !t.echo { + return + } + + x := visualLength(t.prompt) + pos + y := x / t.termWidth + x = x % t.termWidth + + up := 0 + if y < t.cursorY { + up = t.cursorY - y + } + + down := 0 + if y > t.cursorY { + down = y - t.cursorY + } + + left := 0 + if x < t.cursorX { + left = t.cursorX - x + } + + right := 0 + if x > t.cursorX { + right = x - t.cursorX + } + + t.cursorX = x + t.cursorY = y + t.move(up, down, left, right) +} + +func (t *Terminal) move(up, down, left, right int) { + movement := make([]rune, 3*(up+down+left+right)) + m := movement + for i := 0; i < up; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'A' + m = m[3:] + } + for i := 0; i < down; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'B' + m = m[3:] + } + for i := 0; i < left; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'D' + m = m[3:] + } + for i := 0; i < right; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'C' + m = m[3:] + } + + t.queue(movement) +} + +func (t *Terminal) clearLineToRight() { + op := []rune{keyEscape, '[', 'K'} + t.queue(op) +} + +const maxLineLength = 4096 + +func (t *Terminal) setLine(newLine []rune, newPos int) { + if t.echo { + t.moveCursorToPos(0) + t.writeLine(newLine) + for i := len(newLine); i < len(t.line); i++ { + t.writeLine(space) + } + t.moveCursorToPos(newPos) + } + t.line = newLine + t.pos = newPos +} + +func (t *Terminal) advanceCursor(places int) { + t.cursorX += places + t.cursorY += t.cursorX / t.termWidth + if t.cursorY > t.maxLine { + t.maxLine = t.cursorY + } + t.cursorX = t.cursorX % t.termWidth + + if places > 0 && t.cursorX == 0 { + // Normally terminals will advance the current position + // when writing a character. But that doesn't happen + // for the last character in a line. However, when + // writing a character (except a new line) that causes + // a line wrap, the position will be advanced two + // places. + // + // So, if we are stopping at the end of a line, we + // need to write a newline so that our cursor can be + // advanced to the next line. + t.outBuf = append(t.outBuf, '\r', '\n') + } +} + +func (t *Terminal) eraseNPreviousChars(n int) { + if n == 0 { + return + } + + if t.pos < n { + n = t.pos + } + t.pos -= n + t.moveCursorToPos(t.pos) + + copy(t.line[t.pos:], t.line[n+t.pos:]) + t.line = t.line[:len(t.line)-n] + if t.echo { + t.writeLine(t.line[t.pos:]) + for i := 0; i < n; i++ { + t.queue(space) + } + t.advanceCursor(n) + t.moveCursorToPos(t.pos) + } +} + +// countToLeftWord returns then number of characters from the cursor to the +// start of the previous word. +func (t *Terminal) countToLeftWord() int { + if t.pos == 0 { + return 0 + } + + pos := t.pos - 1 + for pos > 0 { + if t.line[pos] != ' ' { + break + } + pos-- + } + for pos > 0 { + if t.line[pos] == ' ' { + pos++ + break + } + pos-- + } + + return t.pos - pos +} + +// countToRightWord returns then number of characters from the cursor to the +// start of the next word. +func (t *Terminal) countToRightWord() int { + pos := t.pos + for pos < len(t.line) { + if t.line[pos] == ' ' { + break + } + pos++ + } + for pos < len(t.line) { + if t.line[pos] != ' ' { + break + } + pos++ + } + return pos - t.pos +} + +// visualLength returns the number of visible glyphs in s. +func visualLength(runes []rune) int { + inEscapeSeq := false + length := 0 + + for _, r := range runes { + switch { + case inEscapeSeq: + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { + inEscapeSeq = false + } + case r == '\x1b': + inEscapeSeq = true + default: + length++ + } + } + + return length +} + +// handleKey processes the given key and, optionally, returns a line of text +// that the user has entered. +func (t *Terminal) handleKey(key rune) (line string, ok bool) { + if t.pasteActive && key != keyEnter { + t.addKeyToLine(key) + return + } + + switch key { + case keyBackspace: + if t.pos == 0 { + return + } + t.eraseNPreviousChars(1) + case keyAltLeft: + // move left by a word. + t.pos -= t.countToLeftWord() + t.moveCursorToPos(t.pos) + case keyAltRight: + // move right by a word. + t.pos += t.countToRightWord() + t.moveCursorToPos(t.pos) + case keyLeft: + if t.pos == 0 { + return + } + t.pos-- + t.moveCursorToPos(t.pos) + case keyRight: + if t.pos == len(t.line) { + return + } + t.pos++ + t.moveCursorToPos(t.pos) + case keyHome: + if t.pos == 0 { + return + } + t.pos = 0 + t.moveCursorToPos(t.pos) + case keyEnd: + if t.pos == len(t.line) { + return + } + t.pos = len(t.line) + t.moveCursorToPos(t.pos) + case keyUp: + entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) + if !ok { + return "", false + } + if t.historyIndex == -1 { + t.historyPending = string(t.line) + } + t.historyIndex++ + runes := []rune(entry) + t.setLine(runes, len(runes)) + case keyDown: + switch t.historyIndex { + case -1: + return + case 0: + runes := []rune(t.historyPending) + t.setLine(runes, len(runes)) + t.historyIndex-- + default: + entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) + if ok { + t.historyIndex-- + runes := []rune(entry) + t.setLine(runes, len(runes)) + } + } + case keyEnter: + t.moveCursorToPos(len(t.line)) + t.queue([]rune("\r\n")) + line = string(t.line) + ok = true + t.line = t.line[:0] + t.pos = 0 + t.cursorX = 0 + t.cursorY = 0 + t.maxLine = 0 + case keyDeleteWord: + // Delete zero or more spaces and then one or more characters. + t.eraseNPreviousChars(t.countToLeftWord()) + case keyDeleteLine: + // Delete everything from the current cursor position to the + // end of line. + for i := t.pos; i < len(t.line); i++ { + t.queue(space) + t.advanceCursor(1) + } + t.line = t.line[:t.pos] + t.moveCursorToPos(t.pos) + case keyCtrlD: + // Erase the character under the current position. + // The EOF case when the line is empty is handled in + // readLine(). + if t.pos < len(t.line) { + t.pos++ + t.eraseNPreviousChars(1) + } + case keyCtrlU: + t.eraseNPreviousChars(t.pos) + case keyClearScreen: + // Erases the screen and moves the cursor to the home position. + t.queue([]rune("\x1b[2J\x1b[H")) + t.queue(t.prompt) + t.cursorX, t.cursorY = 0, 0 + t.advanceCursor(visualLength(t.prompt)) + t.setLine(t.line, t.pos) + default: + if t.AutoCompleteCallback != nil { + prefix := string(t.line[:t.pos]) + suffix := string(t.line[t.pos:]) + + t.lock.Unlock() + newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) + t.lock.Lock() + + if completeOk { + t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) + return + } + } + if !isPrintable(key) { + return + } + if len(t.line) == maxLineLength { + return + } + t.addKeyToLine(key) + } + return +} + +// addKeyToLine inserts the given key at the current position in the current +// line. +func (t *Terminal) addKeyToLine(key rune) { + if len(t.line) == cap(t.line) { + newLine := make([]rune, len(t.line), 2*(1+len(t.line))) + copy(newLine, t.line) + t.line = newLine + } + t.line = t.line[:len(t.line)+1] + copy(t.line[t.pos+1:], t.line[t.pos:]) + t.line[t.pos] = key + if t.echo { + t.writeLine(t.line[t.pos:]) + } + t.pos++ + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) writeLine(line []rune) { + for len(line) != 0 { + remainingOnLine := t.termWidth - t.cursorX + todo := len(line) + if todo > remainingOnLine { + todo = remainingOnLine + } + t.queue(line[:todo]) + t.advanceCursor(visualLength(line[:todo])) + line = line[todo:] + } +} + +// writeWithCRLF writes buf to w but replaces all occurrences of \n with \r\n. +func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) { + for len(buf) > 0 { + i := bytes.IndexByte(buf, '\n') + todo := len(buf) + if i >= 0 { + todo = i + } + + var nn int + nn, err = w.Write(buf[:todo]) + n += nn + if err != nil { + return n, err + } + buf = buf[todo:] + + if i >= 0 { + if _, err = w.Write(crlf); err != nil { + return n, err + } + n += 1 + buf = buf[1:] + } + } + + return n, nil +} + +func (t *Terminal) Write(buf []byte) (n int, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + if t.cursorX == 0 && t.cursorY == 0 { + // This is the easy case: there's nothing on the screen that we + // have to move out of the way. + return writeWithCRLF(t.c, buf) + } + + // We have a prompt and possibly user input on the screen. We + // have to clear it first. + t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) + t.cursorX = 0 + t.clearLineToRight() + + for t.cursorY > 0 { + t.move(1 /* up */, 0, 0, 0) + t.cursorY-- + t.clearLineToRight() + } + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + + if n, err = writeWithCRLF(t.c, buf); err != nil { + return + } + + t.writeLine(t.prompt) + if t.echo { + t.writeLine(t.line) + } + + t.moveCursorToPos(t.pos) + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + return +} + +// ReadPassword temporarily changes the prompt and reads a password, without +// echo, from the terminal. +func (t *Terminal) ReadPassword(prompt string) (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + oldPrompt := t.prompt + t.prompt = []rune(prompt) + t.echo = false + + line, err = t.readLine() + + t.prompt = oldPrompt + t.echo = true + + return +} + +// ReadLine returns a line of input from the terminal. +func (t *Terminal) ReadLine() (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + return t.readLine() +} + +func (t *Terminal) readLine() (line string, err error) { + // t.lock must be held at this point + + if t.cursorX == 0 && t.cursorY == 0 { + t.writeLine(t.prompt) + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + } + + lineIsPasted := t.pasteActive + + for { + rest := t.remainder + lineOk := false + for !lineOk { + var key rune + key, rest = bytesToKey(rest, t.pasteActive) + if key == utf8.RuneError { + break + } + if !t.pasteActive { + if key == keyCtrlD { + if len(t.line) == 0 { + return "", io.EOF + } + } + if key == keyPasteStart { + t.pasteActive = true + if len(t.line) == 0 { + lineIsPasted = true + } + continue + } + } else if key == keyPasteEnd { + t.pasteActive = false + continue + } + if !t.pasteActive { + lineIsPasted = false + } + line, lineOk = t.handleKey(key) + } + if len(rest) > 0 { + n := copy(t.inBuf[:], rest) + t.remainder = t.inBuf[:n] + } else { + t.remainder = nil + } + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + if lineOk { + if t.echo { + t.historyIndex = -1 + t.history.Add(line) + } + if lineIsPasted { + err = ErrPasteIndicator + } + return + } + + // t.remainder is a slice at the beginning of t.inBuf + // containing a partial key sequence + readBuf := t.inBuf[len(t.remainder):] + var n int + + t.lock.Unlock() + n, err = t.c.Read(readBuf) + t.lock.Lock() + + if err != nil { + return + } + + t.remainder = t.inBuf[:n+len(t.remainder)] + } +} + +// SetPrompt sets the prompt to be used when reading subsequent lines. +func (t *Terminal) SetPrompt(prompt string) { + t.lock.Lock() + defer t.lock.Unlock() + + t.prompt = []rune(prompt) +} + +func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { + // Move cursor to column zero at the start of the line. + t.move(t.cursorY, 0, t.cursorX, 0) + t.cursorX, t.cursorY = 0, 0 + t.clearLineToRight() + for t.cursorY < numPrevLines { + // Move down a line + t.move(0, 1, 0, 0) + t.cursorY++ + t.clearLineToRight() + } + // Move back to beginning. + t.move(t.cursorY, 0, 0, 0) + t.cursorX, t.cursorY = 0, 0 + + t.queue(t.prompt) + t.advanceCursor(visualLength(t.prompt)) + t.writeLine(t.line) + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) SetSize(width, height int) error { + t.lock.Lock() + defer t.lock.Unlock() + + if width == 0 { + width = 1 + } + + oldWidth := t.termWidth + t.termWidth, t.termHeight = width, height + + switch { + case width == oldWidth: + // If the width didn't change then nothing else needs to be + // done. + return nil + case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: + // If there is nothing on current line and no prompt printed, + // just do nothing + return nil + case width < oldWidth: + // Some terminals (e.g. xterm) will truncate lines that were + // too long when shinking. Others, (e.g. gnome-terminal) will + // attempt to wrap them. For the former, repainting t.maxLine + // works great, but that behaviour goes badly wrong in the case + // of the latter because they have doubled every full line. + + // We assume that we are working on a terminal that wraps lines + // and adjust the cursor position based on every previous line + // wrapping and turning into two. This causes the prompt on + // xterms to move upwards, which isn't great, but it avoids a + // huge mess with gnome-terminal. + if t.cursorX >= t.termWidth { + t.cursorX = t.termWidth - 1 + } + t.cursorY *= 2 + t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) + case width > oldWidth: + // If the terminal expands then our position calculations will + // be wrong in the future because we think the cursor is + // |t.pos| chars into the string, but there will be a gap at + // the end of any wrapped line. + // + // But the position will actually be correct until we move, so + // we can move back to the beginning and repaint everything. + t.clearAndRepaintLinePlusNPrevious(t.maxLine) + } + + _, err := t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + return err +} + +type pasteIndicatorError struct{} + +func (pasteIndicatorError) Error() string { + return "terminal: ErrPasteIndicator not correctly handled" +} + +// ErrPasteIndicator may be returned from ReadLine as the error, in addition +// to valid line data. It indicates that bracketed paste mode is enabled and +// that the returned line consists only of pasted data. Programs may wish to +// interpret pasted data more literally than typed data. +var ErrPasteIndicator = pasteIndicatorError{} + +// SetBracketedPasteMode requests that the terminal bracket paste operations +// with markers. Not all terminals support this but, if it is supported, then +// enabling this mode will stop any autocomplete callback from running due to +// pastes. Additionally, any lines that are completely pasted will be returned +// from ReadLine with the error set to ErrPasteIndicator. +func (t *Terminal) SetBracketedPasteMode(on bool) { + if on { + io.WriteString(t.c, "\x1b[?2004h") + } else { + io.WriteString(t.c, "\x1b[?2004l") + } +} + +// stRingBuffer is a ring buffer of strings. +type stRingBuffer struct { + // entries contains max elements. + entries []string + max int + // head contains the index of the element most recently added to the ring. + head int + // size contains the number of elements in the ring. + size int +} + +func (s *stRingBuffer) Add(a string) { + if s.entries == nil { + const defaultNumEntries = 100 + s.entries = make([]string, defaultNumEntries) + s.max = defaultNumEntries + } + + s.head = (s.head + 1) % s.max + s.entries[s.head] = a + if s.size < s.max { + s.size++ + } +} + +// NthPreviousEntry returns the value passed to the nth previous call to Add. +// If n is zero then the immediately prior value is returned, if one, then the +// next most recent, and so on. If such an element doesn't exist then ok is +// false. +func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { + if n >= s.size { + return "", false + } + index := s.head - n + if index < 0 { + index += s.max + } + return s.entries[index], true +} + +// readPasswordLine reads from reader until it finds \n or io.EOF. +// The slice returned does not include the \n. +// readPasswordLine also ignores any \r it finds. +func readPasswordLine(reader io.Reader) ([]byte, error) { + var buf [1]byte + var ret []byte + + for { + n, err := reader.Read(buf[:]) + if n > 0 { + switch buf[0] { + case '\n': + return ret, nil + case '\r': + // remove \r from passwords on Windows + default: + ret = append(ret, buf[0]) + } + continue + } + if err != nil { + if err == io.EOF && len(ret) > 0 { + return ret, nil + } + return ret, err + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go new file mode 100644 index 000000000..d01919614 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -0,0 +1,119 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "syscall" + "unsafe" +) + +// State contains the state of a terminal. +type State struct { + termios syscall.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { + return nil, err + } + + newState := oldState.termios + // This attempts to replicate the behaviour documented for cfmakeraw in + // the termios(3) manpage. + newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN + newState.Cflag &^= syscall.CSIZE | syscall.PARENB + newState.Cflag |= syscall.CS8 + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0); err != 0 { + return err + } + return nil +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var dimensions [4]uint16 + + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 { + return -1, -1, err + } + return int(dimensions[1]), int(dimensions[0]), nil +} + +// passwordReader is an io.Reader that reads from a specific file descriptor. +type passwordReader int + +func (r passwordReader) Read(buf []byte) (int, error) { + return syscall.Read(int(r), buf) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var oldState syscall.Termios + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 { + return nil, err + } + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + return nil, err + } + + defer func() { + syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0) + }() + + return readPasswordLine(passwordReader(fd)) +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go new file mode 100644 index 000000000..9c1ffd145 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package terminal + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA +const ioctlWriteTermios = syscall.TIOCSETA diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go new file mode 100644 index 000000000..5883b22d7 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +// These constants are declared here, rather than importing +// them from the syscall package as some syscall packages, even +// on linux, for example gccgo, do not declare them. +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go new file mode 100644 index 000000000..799f049f0 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go @@ -0,0 +1,58 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "fmt" + "runtime" +) + +type State struct{} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + return false +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go new file mode 100644 index 000000000..07eb5edd7 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -0,0 +1,73 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "golang.org/x/sys/unix" + "io" + "syscall" +) + +// State contains the state of a terminal. +type State struct { + termios syscall.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + // see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c + var termio unix.Termio + err := unix.IoctlSetTermio(fd, unix.TCGETA, &termio) + return err == nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + // see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c + val, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + oldState := *val + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState) + if err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState) + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go new file mode 100644 index 000000000..e0a1f36ce --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "syscall" + "unsafe" +) + +const ( + enableLineInput = 2 + enableEchoInput = 4 + enableProcessedInput = 1 + enableWindowInput = 8 + enableMouseInput = 16 + enableInsertMode = 32 + enableQuickEditMode = 64 + enableExtendedFlags = 128 + enableAutoPosition = 256 + enableProcessedOutput = 1 + enableWrapAtEolOutput = 2 +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") +) + +type ( + short int16 + word uint16 + + coord struct { + x short + y short + } + smallRect struct { + left short + top short + right short + bottom short + } + consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord + } +) + +type State struct { + mode uint32 +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, 0, error(e) + } + return int(info.size.x), int(info.size.y), nil +} + +// passwordReader is an io.Reader that reads from a specific Windows HANDLE. +type passwordReader int + +func (r passwordReader) Read(buf []byte) (int, error) { + return syscall.Read(syscall.Handle(r), buf) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + old := st + + st &^= (enableEchoInput) + st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) + if e != 0 { + return nil, error(e) + } + + defer func() { + syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) + }() + + return readPasswordLine(passwordReader(fd)) +} diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 000000000..533438d91 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,67 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +package errgroup + +import ( + "sync" + + "golang.org/x/net/context" +) + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid and does not cancel on error. +type Group struct { + cancel func() + + wg sync.WaitGroup + + errOnce sync.Once + err error +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancel(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel() + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to return a non-nil error cancels the group; its error will be +// returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 1ad929935..97a1a27f8 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -232,6 +232,12 @@ "path": "github.com/NaverCloudPlatform/ncloud-sdk-go/sdk", "revision": "c2e73f942591b0f033a3c6df00f44badb2347c38", "revisionTime": "2018-01-10T05:50:12Z" + }, + { + "checksumSHA1": "8dVO3L8yAdQ17X3lAhIziyF3OFk=", + "path": "github.com/Sirupsen/logrus", + "revision": "10f801ebc38b33738c9d17d50860f484a0988ff5", + "revisionTime": "2017-03-17T14:32:14Z" }, { "checksumSHA1": "HttiPj314X1a0i2Jen1p6lRH/vE=", @@ -589,6 +595,12 @@ "path": "github.com/biogo/hts/bgzf", "revision": "50da7d4131a3b5c9d063932461cab4d1fafb20b0" }, + { + "checksumSHA1": "bFj0ceSRvaFFCfmS4el1PjWhcgw=", + "path": "github.com/creack/goselect", + "revision": "1bd5ca702c6154bccc56ecd598932ee8b295cab2", + "revisionTime": "2016-07-14T17:28:59Z" + }, { "checksumSHA1": "Lf3uUXTkKK5DJ37BxQvxO1Fq+K8=", "comment": "v1.0.0-3-g6d21280", @@ -638,6 +650,24 @@ "revision": "4c04abe183f449bd9ede285f0e5c7ee575d0dbe4", "revisionTime": "2017-04-07T15:15:42Z" }, + { + "checksumSHA1": "1n5MBJthemxmfqU2gN3qLCd8s04=", + "path": "github.com/docker/docker/pkg/namesgenerator", + "revision": "fa3e2d5ab9b577cecd24201902bbe72b3f1b851c", + "revisionTime": "2017-04-06T12:40:27Z" + }, + { + "checksumSHA1": "lThih54jzz9A4zHKEFb9SIV3Ed0=", + "path": "github.com/docker/docker/pkg/random", + "revision": "fa3e2d5ab9b577cecd24201902bbe72b3f1b851c", + "revisionTime": "2017-04-06T12:40:27Z" + }, + { + "checksumSHA1": "rhLUtXvcmouYuBwOq9X/nYKzvNg=", + "path": "github.com/dustin/go-humanize", + "revision": "259d2a102b871d17f30e3cd9881a642961a1e486", + "revisionTime": "2017-02-28T07:34:54Z" + }, { "checksumSHA1": "GCskdwYAPW2S34918Z5CgNMJ2Wc=", "path": "github.com/dylanmei/iso8601", @@ -779,6 +809,12 @@ "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", "revisionTime": "2017-06-23T01:44:30Z" }, + { + "checksumSHA1": "xSmii71kfQASGNG2C8ttmHx9KTE=", + "path": "github.com/gorilla/websocket", + "revision": "a91eba7f97777409bc2c443f5534d41dd20c5720", + "revisionTime": "2017-03-19T17:27:27Z" + }, { "checksumSHA1": "izBSRxLAHN+a/XpAku0in05UzlY=", "comment": "20141209094003-92-g95fa852", @@ -1032,6 +1068,18 @@ "path": "github.com/mitchellh/reflectwalk", "revision": "eecf4c70c626c7cfbb95c90195bc34d386c74ac6" }, + { + "checksumSHA1": "KhOVzKefFYORHdIVe+/gNAHB23A=", + "path": "github.com/moul/anonuuid", + "revision": "609b752a95effbbef26d134ac18ed6f57e01b98e", + "revisionTime": "2016-02-22T16:21:17Z" + }, + { + "checksumSHA1": "WcXDSYIAP73RAvy22iD57nE/peI=", + "path": "github.com/moul/gotty-client", + "revision": "99224eea3278d662fce9124bb2bf6c2bb39f5160", + "revisionTime": "2017-02-05T09:54:39Z" + }, { "checksumSHA1": "gcLub3oB+u4QrOJZcYmk/y2AP4k=", "path": "github.com/nu7hatch/gouuid", @@ -1117,6 +1165,12 @@ "revision": "7bdb11aecb0e457ea23c86898c6b49bfc0eb4bb1", "revisionTime": "2017-08-01T13:52:49Z" }, + { + "checksumSHA1": "DF3jZEw4lCq/SEaC7DIl/R+7S70=", + "path": "github.com/renstrom/fuzzysearch/fuzzy", + "revision": "2d205ac6ec17a839a94bdbfd16d2fa6c6dada2e0", + "revisionTime": "2016-03-31T20:48:55Z" + }, { "checksumSHA1": "zmC8/3V4ls53DJlNTKDZwPSC/dA=", "path": "github.com/satori/go.uuid", @@ -1129,6 +1183,24 @@ "revision": "5bf94b69c6b68ee1b541973bb8e1144db23a194b", "revisionTime": "2017-03-21T23:07:31Z" }, + { + "checksumSHA1": "FFhSGe3Y3J1laR/6rwSS7U2esrk=", + "path": "github.com/scaleway/scaleway-cli/pkg/api", + "revision": "e50cb485747a4f25a361c90ef3ba05be79944c56", + "revisionTime": "2017-04-03T16:01:47Z" + }, + { + "checksumSHA1": "kveaAmNlnvmIIuEkFcMlB+N7TqY=", + "path": "github.com/scaleway/scaleway-cli/pkg/sshcommand", + "revision": "e50cb485747a4f25a361c90ef3ba05be79944c56", + "revisionTime": "2017-04-03T16:01:47Z" + }, + { + "checksumSHA1": "xM3G5ct9YYYnVIL3XMRrcf41xVw=", + "path": "github.com/scaleway/scaleway-cli/pkg/utils", + "revision": "e50cb485747a4f25a361c90ef3ba05be79944c56", + "revisionTime": "2017-04-03T16:01:47Z" + }, { "checksumSHA1": "iydUphwYqZRq3WhstEdGsbvBAKs=", "comment": "v1.1.4-4-g976c720", @@ -1281,6 +1353,12 @@ "revision": "453249f01cfeb54c3d549ddb75ff152ca243f9d8", "revisionTime": "2017-02-08T20:51:15Z" }, + { + "checksumSHA1": "xiderUuvye8Kpn7yX3niiJg32bE=", + "path": "golang.org/x/crypto/ssh/terminal", + "revision": "c2303dcbe84172e0c0da4c9f083eeca54c06f298", + "revisionTime": "2017-01-17T19:20:27Z" + }, { "checksumSHA1": "GtamqiJoL7PGHsN454AoffBFMa8=", "path": "golang.org/x/net/context", @@ -1342,6 +1420,12 @@ "path": "golang.org/x/oauth2/jwt", "revision": "8a57ed94ffd43444c0879fe75701732a38afc985" }, + { + "checksumSHA1": "S0DP7Pn7sZUmXc55IzZnNvERu6s=", + "path": "golang.org/x/sync/errgroup", + "revision": "5a06fca2c336a4b2b2fcb45702e8c47621b2aa2c", + "revisionTime": "2017-03-17T17:13:11Z" + }, { "checksumSHA1": "NzQ3QYllWwK+3GZliu11jMU6xwo=", "path": "golang.org/x/sys/unix", diff --git a/website/source/docs/builders/scaleway.html.md b/website/source/docs/builders/scaleway.html.md new file mode 100644 index 000000000..b1864bdb0 --- /dev/null +++ b/website/source/docs/builders/scaleway.html.md @@ -0,0 +1,95 @@ +--- +layout: docs +sidebar_current: docs-builders-scaleway +page_title: Scaleway - Builders +description: |- + The Scaleway Packer builder is able to create new images for use with + Scaleway BareMetal and Virtual cloud server. The builder takes a source + image, runs any provisioning necessary on the image after launching it, then + snapshots it into a reusable image. This reusable image can then be used as + the foundation of new servers that are launched within Scaleway. +--- + + +# Scaleway Builder + +Type: `scaleway` + +The `scaleway` Packer builder is able to create new images for use with +[Scaleway](https://www.scaleway.com). The builder takes a source image, +runs any provisioning necessary on the image after launching it, then snapshots +it into a reusable image. This reusable image can then be used as the foundation +of new servers that are launched within Scaleway. + +The builder does *not* manage snapshots. Once it creates an image, it is up to you +to use it or delete it. + +## Configuration Reference + +There are many configuration options available for the builder. They are +segmented below into two categories: required and optional parameters. Within +each category, the available configuration keys are alphabetized. + +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) can be configured for this +builder. + +### Required: + +- `api_access_key` (string) - The api\_access\_key to use to access your + account. It can also be specified via environment variable + `SCALEWAY_API_ACCESS_KEY`. Your access key is available in the + ["Credentials" section](https://cloud.scaleway.com/#/credentials) of the + control panel. + +- `api_token` (string) - The organization TOKEN to use to access your + account. It can also be specified via environment variable + `SCALEWAY_API_TOKEN`. Your tokens are available in the ["Credentials" + section](https://cloud.scaleway.com/#/credentials) of the control panel. + +- `image` (string) - The UUID of the base image to use. This is the image + that will be used to launch a new server and provision it. See + get the complete list of the + accepted image UUID. + +- `region` (string) - The name of the region to launch the server in (`par1` + or `ams1`). Consequently, this is the region where the snapshot will be + available. + +- `commercial_type` (string) - The name of the server commercial type: `C1`, + `C2S`, `C2M`, `C2L`, `X64-2GB`, `X64-4GB`, `X64-8GB`, `X64-15GB`, + `X64-30GB`, `X64-60GB`, `X64-120GB`, `ARM64-2GB`, `ARM64-4GB`, `ARM64-8GB`, + `ARM64-16GB`, `ARM64-32GB`, `ARM64-64GB`, `ARM64-128GB` + +### Optional: + +- `server_name` (string) - The name assigned to the server. Default + `packer-UUID` + +- `image_name` (string) - The name of the resulting image that will appear in + your account. Default `packer-TIMESTAMP` + +- `snapshot_name` (string) - The name of the resulting snapshot that will + appear in your account. Default `packer-TIMESTAMP` + +## Basic Example + +Here is a basic example. It is completely valid as soon as you enter your own +access tokens: + +```json +{ + "type": "scaleway", + "api_access_key": "YOUR API ACCESS KEY", + "api_token": "YOUR TOKEN", + "image": "UUID OF THE BASE IMAGE", + "region": "par1", + "commercial_type": "X64-2GB", + "ssh_username": "root", + "ssh_private_key_file": "~/.ssh/id_rsa" +} +``` + +When you do not specified the `ssh_private_key_file`, a temporarily SSH keypair +is generated to connect the server. This key will only allow the `root` user to +connect the server. diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 7f12d326c..5f089e041 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -162,6 +162,9 @@ > QEMU + > + Scaleway + > Triton