From 5dc6b183655d4eabfe4c9fb6dc22a2e9522e2ac4 Mon Sep 17 00:00:00 2001 From: YAMADA Tsuyoshi Date: Thu, 10 Nov 2016 18:50:31 +0900 Subject: [PATCH 001/251] googlecompute-export: use application default credential as same as googlecompute builder --- .../googlecompute-export/post-processor.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/post-processor/googlecompute-export/post-processor.go b/post-processor/googlecompute-export/post-processor.go index f198ab7c8..51a61c9a3 100644 --- a/post-processor/googlecompute-export/post-processor.go +++ b/post-processor/googlecompute-export/post-processor.go @@ -2,7 +2,6 @@ package googlecomputeexport import ( "fmt" - "io/ioutil" "strings" "github.com/mitchellh/multistep" @@ -83,13 +82,12 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac exporterConfig.CalcTimeout() // Set up credentials and GCE driver. - b, err := ioutil.ReadFile(accountKeyFilePath) - if err != nil { - err = fmt.Errorf("Error fetching account credentials: %s", err) - return nil, p.config.KeepOriginalImage, err + if accountKeyFilePath != "" { + err := googlecompute.ProcessAccountFile(&exporterConfig.Account, accountKeyFilePath) + if err != nil { + return nil, p.config.KeepOriginalImage, err + } } - accountKeyContents := string(b) - googlecompute.ProcessAccountFile(&exporterConfig.Account, accountKeyContents) driver, err := googlecompute.NewDriverGCE(ui, projectId, &exporterConfig.Account) if err != nil { return nil, p.config.KeepOriginalImage, err From fdaf4ed8d33bcdfb19fb1269a4c9d1c63e3cc3a5 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 8 Sep 2017 11:31:19 -0700 Subject: [PATCH 002/251] Gracefully clean up on SIGTERM --- command/build.go | 3 ++- command/push.go | 3 ++- main.go | 2 ++ packer/plugin/server.go | 6 ++++-- packer/ui.go | 2 +- stdin.go | 3 ++- 6 files changed, 13 insertions(+), 6 deletions(-) diff --git a/command/build.go b/command/build.go index de9da51ee..25fc96e71 100644 --- a/command/build.go +++ b/command/build.go @@ -9,6 +9,7 @@ import ( "strconv" "strings" "sync" + "syscall" "github.com/hashicorp/packer/helper/enumflag" "github.com/hashicorp/packer/packer" @@ -144,7 +145,7 @@ func (c BuildCommand) Run(args []string) int { // Handle interrupts for this build sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, os.Interrupt) + signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM) defer signal.Stop(sigCh) go func(b packer.Build) { <-sigCh diff --git a/command/push.go b/command/push.go index e1d6c7db2..723e9d92c 100644 --- a/command/push.go +++ b/command/push.go @@ -8,6 +8,7 @@ import ( "path/filepath" "regexp" "strings" + "syscall" "github.com/hashicorp/atlas-go/archive" "github.com/hashicorp/atlas-go/v1" @@ -267,7 +268,7 @@ func (c *PushCommand) Run(args []string) int { // Make a ctrl-C channel sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, os.Interrupt) + signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM) defer signal.Stop(sigCh) err = nil diff --git a/main.go b/main.go index 689780cd4..b5c884eba 100644 --- a/main.go +++ b/main.go @@ -13,6 +13,7 @@ import ( "path/filepath" "runtime" "sync" + "syscall" "time" "github.com/hashicorp/go-uuid" @@ -86,6 +87,7 @@ func realMain() int { wrapConfig.Writer = io.MultiWriter(logTempFile, logWriter) wrapConfig.Stdout = outW wrapConfig.DetectDuration = 500 * time.Millisecond + wrapConfig.ForwardSignals = []os.Signal{syscall.SIGTERM} exitStatus, err := panicwrap.Wrap(&wrapConfig) if err != nil { fmt.Fprintf(os.Stderr, "Couldn't start Packer: %s", err) diff --git a/packer/plugin/server.go b/packer/plugin/server.go index 667907a5f..470daf950 100644 --- a/packer/plugin/server.go +++ b/packer/plugin/server.go @@ -10,7 +10,6 @@ package plugin import ( "errors" "fmt" - packrpc "github.com/hashicorp/packer/packer/rpc" "io/ioutil" "log" "math/rand" @@ -20,7 +19,10 @@ import ( "runtime" "strconv" "sync/atomic" + "syscall" "time" + + packrpc "github.com/hashicorp/packer/packer/rpc" ) // This is a count of the number of interrupts the process has received. @@ -87,7 +89,7 @@ func Server() (*packrpc.Server, error) { // Eat the interrupts ch := make(chan os.Signal, 1) - signal.Notify(ch, os.Interrupt) + signal.Notify(ch, os.Interrupt, syscall.SIGTERM) go func() { var count int32 = 0 for { diff --git a/packer/ui.go b/packer/ui.go index c107c23b8..c16a2eae0 100644 --- a/packer/ui.go +++ b/packer/ui.go @@ -180,7 +180,7 @@ func (rw *BasicUi) Ask(query string) (string, error) { rw.scanner = bufio.NewScanner(rw.Reader) } sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, os.Interrupt) + signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM) defer signal.Stop(sigCh) log.Printf("ui: ask: %s", query) diff --git a/stdin.go b/stdin.go index 2df4153f5..758cc6864 100644 --- a/stdin.go +++ b/stdin.go @@ -5,6 +5,7 @@ import ( "log" "os" "os/signal" + "syscall" ) // setupStdin switches out stdin for a pipe. We do this so that we can @@ -26,7 +27,7 @@ func setupStdin() { // Register a signal handler for interrupt in order to close the // writer end of our pipe so that readers get EOF downstream. ch := make(chan os.Signal, 1) - signal.Notify(ch, os.Interrupt) + signal.Notify(ch, os.Interrupt, syscall.SIGTERM) go func() { defer signal.Stop(ch) From 644ac5b367b10d0dd1b82dd84bd3176a804ed739 Mon Sep 17 00:00:00 2001 From: bugbuilder Date: Wed, 20 Sep 2017 22:50:37 -0300 Subject: [PATCH 003/251] enable vsphere-template to work with local builders --- .../vsphere-template/post-processor.go | 17 ++++++--- .../vsphere-template/step_mark_as_template.go | 34 ++++++++++++++--- post-processor/vsphere/artifact.go | 38 +++++++++++++++++++ post-processor/vsphere/post-processor.go | 9 ++++- 4 files changed, 85 insertions(+), 13 deletions(-) create mode 100644 post-processor/vsphere/artifact.go diff --git a/post-processor/vsphere-template/post-processor.go b/post-processor/vsphere-template/post-processor.go index 86c9f54b4..e68e2d21c 100644 --- a/post-processor/vsphere-template/post-processor.go +++ b/post-processor/vsphere-template/post-processor.go @@ -7,16 +7,19 @@ import ( "strings" "time" + "github.com/hashicorp/packer/builder/vmware/iso" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/config" "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/post-processor/vsphere" "github.com/hashicorp/packer/template/interpolate" "github.com/mitchellh/multistep" "github.com/vmware/govmomi" ) var builtins = map[string]string{ - "mitchellh.vmware-esx": "vmware", + vsphere.BuilderId: "vmware", + iso.BuilderIdESX: "vmware", } type Config struct { @@ -90,11 +93,16 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac source := "" for _, path := range artifact.Files() { - if strings.HasSuffix(path, ".vmx") { + if strings.HasSuffix(path, ".vmx") || strings.HasSuffix(path, ".ovf") || strings.HasSuffix(path, ".ova") { source = path break } } + + if source == "" { + return nil, false, fmt.Errorf("VMX, OVF or OVA file not found") + } + // In some occasions the VM state is powered on and if we immediately try to mark as template // (after the ESXi creates it) it will fail. If vSphere is given a few seconds this behavior doesn't reappear. ui.Message("Waiting 10s for VMware vSphere to start") @@ -117,10 +125,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac &stepCreateFolder{ Folder: p.config.Folder, }, - &stepMarkAsTemplate{ - VMName: artifact.Id(), - Source: source, - }, + NewStepMarkAsTemplate(artifact.Id(), source), } runner := common.NewRunnerWithPauseFn(steps, p.config.PackerConfig, ui, state) runner.Run(state) diff --git a/post-processor/vsphere-template/step_mark_as_template.go b/post-processor/vsphere-template/step_mark_as_template.go index 0e5465054..dda3984a6 100644 --- a/post-processor/vsphere-template/step_mark_as_template.go +++ b/post-processor/vsphere-template/step_mark_as_template.go @@ -13,8 +13,30 @@ import ( ) type stepMarkAsTemplate struct { - VMName string - Source string + VMName string + Source string + RemoteFolder string +} + +func NewStepMarkAsTemplate(vmname, source string) *stepMarkAsTemplate { + remoteFolder := "Discovered virtual machine" + + if strings.Contains(vmname, "::") { + local := strings.Split(vmname, "::") + + datastore := local[0] + remoteFolder = local[1] + vmname = local[2] + + source = path.Join("/vmfs/volumes/", datastore, vmname, path.Base(source)) + + } + + return &stepMarkAsTemplate{ + VMName: vmname, + Source: source, + RemoteFolder: remoteFolder, + } } func (s *stepMarkAsTemplate) Run(state multistep.StateBag) multistep.StepAction { @@ -25,7 +47,7 @@ func (s *stepMarkAsTemplate) Run(state multistep.StateBag) multistep.StepAction ui.Message("Marking as a template...") - vm, err := findRuntimeVM(cli, dcPath, s.VMName) + vm, err := findRuntimeVM(cli, dcPath, s.VMName, s.RemoteFolder) if err != nil { state.Put("error", err) ui.Error(err.Error()) @@ -75,10 +97,10 @@ func (s *stepMarkAsTemplate) Run(state multistep.StateBag) multistep.StepAction return multistep.ActionContinue } -// We will use the virtual machine created by vmware-iso builder -func findRuntimeVM(cli *govmomi.Client, dcPath, name string) (*object.VirtualMachine, error) { +// We will use the virtual machine created/uploaded by vmware builder (remote or local) +func findRuntimeVM(cli *govmomi.Client, dcPath, name, remoteFolder string) (*object.VirtualMachine, error) { si := object.NewSearchIndex(cli.Client) - fullPath := path.Join(dcPath, "vm", "Discovered virtual machine", name) + fullPath := path.Join(dcPath, "vm", remoteFolder, name) ref, err := si.FindByInventoryPath(context.Background(), fullPath) if err != nil { diff --git a/post-processor/vsphere/artifact.go b/post-processor/vsphere/artifact.go new file mode 100644 index 000000000..d8ef75491 --- /dev/null +++ b/post-processor/vsphere/artifact.go @@ -0,0 +1,38 @@ +package vsphere + +import ( + "fmt" +) + +const BuilderId = "packer.post-processor.vsphere" + +type Artifact struct { + files []string + datastore string + vmfolder string + vmname string +} + +func (*Artifact) BuilderId() string { + return BuilderId +} + +func (a *Artifact) Files() []string { + return a.files +} + +func (a *Artifact) Id() string { + return fmt.Sprintf("%s::%s::%s", a.datastore, a.vmfolder, a.vmname) +} + +func (a *Artifact) String() string { + return fmt.Sprintf("VM: %s Folder: %s Datastore: %s", a.vmname, a.vmfolder, a.datastore) +} + +func (*Artifact) State(name string) interface{} { + return nil +} + +func (a *Artifact) Destroy() error { + return nil +} diff --git a/post-processor/vsphere/post-processor.go b/post-processor/vsphere/post-processor.go index e97147334..ead3b50fd 100644 --- a/post-processor/vsphere/post-processor.go +++ b/post-processor/vsphere/post-processor.go @@ -142,7 +142,14 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac return nil, false, fmt.Errorf("Failed: %s\n", err) } - return artifact, false, nil + artifact = &Artifact{ + datastore: p.config.Datastore, + files: artifact.Files(), + vmfolder: p.config.VMFolder, + vmname: p.config.VMName, + } + + return artifact, true, nil } func (p *PostProcessor) BuildArgs(source, ovftool_uri string) ([]string, error) { From 24a8fddf035ef2360bd5dd4bb846fcb5507d8233 Mon Sep 17 00:00:00 2001 From: bugbuilder Date: Fri, 22 Sep 2017 13:54:11 -0300 Subject: [PATCH 004/251] showing artifact info in packer UI --- post-processor/vsphere-template/post-processor.go | 1 + post-processor/vsphere/post-processor.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/post-processor/vsphere-template/post-processor.go b/post-processor/vsphere-template/post-processor.go index e68e2d21c..5cec3bda6 100644 --- a/post-processor/vsphere-template/post-processor.go +++ b/post-processor/vsphere-template/post-processor.go @@ -87,6 +87,7 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { + ui.Say(fmt.Sprintf("Artifact %s with id %s and builderId %s", artifact, artifact.Id(), artifact.BuilderId())) if _, ok := builtins[artifact.BuilderId()]; !ok { return nil, false, fmt.Errorf("Unknown artifact type, can't build box: %s", artifact.BuilderId()) } diff --git a/post-processor/vsphere/post-processor.go b/post-processor/vsphere/post-processor.go index ead3b50fd..0a6e4a6a3 100644 --- a/post-processor/vsphere/post-processor.go +++ b/post-processor/vsphere/post-processor.go @@ -149,6 +149,8 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac vmname: p.config.VMName, } + ui.Say(fmt.Sprintf("New artifact created %s with id %s and builderId %s", artifact, artifact.Id(), artifact.BuilderId())) + return artifact, true, nil } From 99dd19ccfdec298992f57355804181bc1b7353c2 Mon Sep 17 00:00:00 2001 From: bugbuilder Date: Fri, 22 Sep 2017 23:37:27 -0300 Subject: [PATCH 005/251] Adding correct reference to VM remote path --- post-processor/vsphere-template/post-processor.go | 1 - post-processor/vsphere-template/step_mark_as_template.go | 2 +- post-processor/vsphere/post-processor.go | 2 -- 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/post-processor/vsphere-template/post-processor.go b/post-processor/vsphere-template/post-processor.go index 5cec3bda6..e68e2d21c 100644 --- a/post-processor/vsphere-template/post-processor.go +++ b/post-processor/vsphere-template/post-processor.go @@ -87,7 +87,6 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { - ui.Say(fmt.Sprintf("Artifact %s with id %s and builderId %s", artifact, artifact.Id(), artifact.BuilderId())) if _, ok := builtins[artifact.BuilderId()]; !ok { return nil, false, fmt.Errorf("Unknown artifact type, can't build box: %s", artifact.BuilderId()) } diff --git a/post-processor/vsphere-template/step_mark_as_template.go b/post-processor/vsphere-template/step_mark_as_template.go index dda3984a6..60780dd0d 100644 --- a/post-processor/vsphere-template/step_mark_as_template.go +++ b/post-processor/vsphere-template/step_mark_as_template.go @@ -28,7 +28,7 @@ func NewStepMarkAsTemplate(vmname, source string) *stepMarkAsTemplate { remoteFolder = local[1] vmname = local[2] - source = path.Join("/vmfs/volumes/", datastore, vmname, path.Base(source)) + source = path.Join("/vmfs/volumes/", datastore, vmname, vmname+path.Ext(source)) } diff --git a/post-processor/vsphere/post-processor.go b/post-processor/vsphere/post-processor.go index 0a6e4a6a3..ead3b50fd 100644 --- a/post-processor/vsphere/post-processor.go +++ b/post-processor/vsphere/post-processor.go @@ -149,8 +149,6 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac vmname: p.config.VMName, } - ui.Say(fmt.Sprintf("New artifact created %s with id %s and builderId %s", artifact, artifact.Id(), artifact.BuilderId())) - return artifact, true, nil } From f1773a57f8d624d39bcb84bcdf09f7ebd3179ede Mon Sep 17 00:00:00 2001 From: bugbuilder Date: Sat, 23 Sep 2017 03:01:35 -0300 Subject: [PATCH 006/251] using vmx extension as default --- post-processor/vsphere-template/step_mark_as_template.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/post-processor/vsphere-template/step_mark_as_template.go b/post-processor/vsphere-template/step_mark_as_template.go index 60780dd0d..82b26a7b5 100644 --- a/post-processor/vsphere-template/step_mark_as_template.go +++ b/post-processor/vsphere-template/step_mark_as_template.go @@ -28,7 +28,7 @@ func NewStepMarkAsTemplate(vmname, source string) *stepMarkAsTemplate { remoteFolder = local[1] vmname = local[2] - source = path.Join("/vmfs/volumes/", datastore, vmname, vmname+path.Ext(source)) + source = path.Join("/vmfs/volumes/", datastore, vmname, vmname+".vmx") } From 75a4ca7351899a0018fee74b8ab621b40546628f Mon Sep 17 00:00:00 2001 From: bugbuilder Date: Sat, 23 Sep 2017 15:43:57 -0300 Subject: [PATCH 007/251] adding artifact testing and using builder id --- .../vsphere-template/post-processor.go | 2 +- .../vsphere-template/step_mark_as_template.go | 17 +++++++-------- post-processor/vsphere/artifact.go | 9 ++++++++ post-processor/vsphere/artifact_test.go | 21 +++++++++++++++++++ post-processor/vsphere/post-processor.go | 7 +------ 5 files changed, 40 insertions(+), 16 deletions(-) create mode 100644 post-processor/vsphere/artifact_test.go diff --git a/post-processor/vsphere-template/post-processor.go b/post-processor/vsphere-template/post-processor.go index e68e2d21c..6014180c7 100644 --- a/post-processor/vsphere-template/post-processor.go +++ b/post-processor/vsphere-template/post-processor.go @@ -125,7 +125,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac &stepCreateFolder{ Folder: p.config.Folder, }, - NewStepMarkAsTemplate(artifact.Id(), source), + NewStepMarkAsTemplate(artifact, source), } runner := common.NewRunnerWithPauseFn(steps, p.config.PackerConfig, ui, state) runner.Run(state) diff --git a/post-processor/vsphere-template/step_mark_as_template.go b/post-processor/vsphere-template/step_mark_as_template.go index 82b26a7b5..78949d2d9 100644 --- a/post-processor/vsphere-template/step_mark_as_template.go +++ b/post-processor/vsphere-template/step_mark_as_template.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/post-processor/vsphere" "github.com/mitchellh/multistep" "github.com/vmware/govmomi" "github.com/vmware/govmomi/object" @@ -18,18 +19,16 @@ type stepMarkAsTemplate struct { RemoteFolder string } -func NewStepMarkAsTemplate(vmname, source string) *stepMarkAsTemplate { +func NewStepMarkAsTemplate(artifact packer.Artifact, source string) *stepMarkAsTemplate { remoteFolder := "Discovered virtual machine" + vmname := artifact.Id() - if strings.Contains(vmname, "::") { - local := strings.Split(vmname, "::") - - datastore := local[0] - remoteFolder = local[1] - vmname = local[2] - + if artifact.BuilderId() == vsphere.BuilderId { + id := strings.Split(artifact.Id(), "::") + datastore := id[0] + remoteFolder = id[1] + vmname = id[2] source = path.Join("/vmfs/volumes/", datastore, vmname, vmname+".vmx") - } return &stepMarkAsTemplate{ diff --git a/post-processor/vsphere/artifact.go b/post-processor/vsphere/artifact.go index d8ef75491..90e475a28 100644 --- a/post-processor/vsphere/artifact.go +++ b/post-processor/vsphere/artifact.go @@ -13,6 +13,15 @@ type Artifact struct { vmname string } +func NewArtifact(datastore, vmfolder, vmname string, files []string) *Artifact { + return &Artifact{ + files: files, + datastore: datastore, + vmfolder: vmfolder, + vmname: vmname, + } +} + func (*Artifact) BuilderId() string { return BuilderId } diff --git a/post-processor/vsphere/artifact_test.go b/post-processor/vsphere/artifact_test.go new file mode 100644 index 000000000..b4f17c759 --- /dev/null +++ b/post-processor/vsphere/artifact_test.go @@ -0,0 +1,21 @@ +package vsphere + +import ( + "github.com/hashicorp/packer/packer" + "testing" +) + +func TestArtifact_ImplementsArtifact(t *testing.T) { + var raw interface{} + raw = &Artifact{} + if _, ok := raw.(packer.Artifact); !ok { + t.Fatalf("Artifact should be a Artifact") + } +} + +func TestArtifact_Id(t *testing.T) { + artifact := NewArtifact("datastore", "vmfolder", "vmname", nil) + if artifact.Id() != "datastore::vmfolder::vmname" { + t.Fatalf("must return datastore, vmfolder and vmname splitted by :: as Id") + } +} diff --git a/post-processor/vsphere/post-processor.go b/post-processor/vsphere/post-processor.go index ead3b50fd..a022cb23f 100644 --- a/post-processor/vsphere/post-processor.go +++ b/post-processor/vsphere/post-processor.go @@ -142,12 +142,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac return nil, false, fmt.Errorf("Failed: %s\n", err) } - artifact = &Artifact{ - datastore: p.config.Datastore, - files: artifact.Files(), - vmfolder: p.config.VMFolder, - vmname: p.config.VMName, - } + artifact = NewArtifact(p.config.Datastore, p.config.VMFolder, p.config.VMName, artifact.Files()) return artifact, true, nil } From b3a0e51fe50b0caedc6ff88bfebfb53c6193745c Mon Sep 17 00:00:00 2001 From: bugbuilder Date: Sun, 24 Sep 2017 01:42:28 -0300 Subject: [PATCH 008/251] adding documentation --- .../post-processors/vsphere-template.html.md | 37 +++++++++++++++++-- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/website/source/docs/post-processors/vsphere-template.html.md b/website/source/docs/post-processors/vsphere-template.html.md index e72dc19cc..511224cfd 100644 --- a/website/source/docs/post-processors/vsphere-template.html.md +++ b/website/source/docs/post-processors/vsphere-template.html.md @@ -1,7 +1,7 @@ --- description: | - The Packer vSphere Template post-processor takes an artifact from the VMware-iso builder built on ESXi (i.e. remote) - and allows to mark a VM as a template and leaving it in a path of choice. + The Packer vSphere Template post-processor takes an artifact from the VMware-iso builder, built on ESXi (i.e. remote) + or an artifact from the vSphere post-processor and allows to mark a VM as a template and leaving it in a path of choice. layout: docs page_title: 'vSphere Template - Post-Processors' sidebar_current: 'docs-post-processors-vSphere-template' @@ -11,8 +11,9 @@ sidebar_current: 'docs-post-processors-vSphere-template' Type: `vsphere-template` -The Packer vSphere template post-processor takes an artifact from the VMware-iso builder built on ESXi (i.e. remote) and -allows to mark a VM as a template and leaving it in a path of choice. +The Packer vSphere Template post-processor takes an artifact from the VMware-iso builder, built on ESXi (i.e. remote) +or an artifact from the [vSphere](/docs/post-processors/vsphere.html) post-processor and allows to mark a VM as a +template and leaving it in a path of choice. ## Example @@ -51,3 +52,31 @@ Optional: - `folder` (string) - Target path where the template will be created. - `insecure` (boolean) - If it's true skip verification of server certificate. Default is false + +## Using the vSphere Template with local builders + +Once the [vSphere](/docs/post-processors/vsphere.html) takes an artifact from the VMware builder and uploads it +to a vSphere endpoint, you will likely want to mark that VM as template. Packer can do this for you automatically +using a sequence definition (a collection of post-processors that are treated as as single pipeline, see +[Post-Processors](/docs/templates/post-processors.html) for more information): + +``` json +{ + "post-processors": [ + [ + { + "type": "vsphere", + ... + }, + { + "type": "vsphere-template", + ... + } + ] + ] +} +``` + +In the example above, the result of each builder is passed through the defined sequence of post-processors starting +with the `vsphere` post-processor which will upload the artifact to a vSphere endpoint. The resulting artifact is then +passed on to the `vsphere-template` post-processor which handles marking a VM as a template. From 50904064e19053d18fc61c4f3b26bcd3db339f9a Mon Sep 17 00:00:00 2001 From: bugbuilder Date: Sun, 24 Sep 2017 21:56:35 -0300 Subject: [PATCH 009/251] doesn't keep the original artifact --- post-processor/vsphere/post-processor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/post-processor/vsphere/post-processor.go b/post-processor/vsphere/post-processor.go index a022cb23f..d8b40d654 100644 --- a/post-processor/vsphere/post-processor.go +++ b/post-processor/vsphere/post-processor.go @@ -144,7 +144,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac artifact = NewArtifact(p.config.Datastore, p.config.VMFolder, p.config.VMName, artifact.Files()) - return artifact, true, nil + return artifact, false, nil } func (p *PostProcessor) BuildArgs(source, ovftool_uri string) ([]string, error) { From 35ef7bce5ca489a6a74a92cb3386f34e637dedc3 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 3 Oct 2017 15:35:03 -0700 Subject: [PATCH 010/251] implement dir check for winrm communicator --- communicator/winrm/communicator.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go index d1f7d2ab6..a725fe51f 100644 --- a/communicator/winrm/communicator.go +++ b/communicator/winrm/communicator.go @@ -127,6 +127,23 @@ func (c *Communicator) Upload(path string, input io.Reader, _ *os.FileInfo) erro if err != nil { return err } + + // Get information about destination path + endpoint := winrm.NewEndpoint(c.endpoint.Host, c.endpoint.Port, c.config.Https, c.config.Insecure, nil, nil, nil, c.config.Timeout) + client, err := winrm.NewClient(endpoint, c.config.Username, c.config.Password) + if err != nil { + return fmt.Errorf("Was unable to create winrm client: %s", err) + } + stdout, _, _, err := client.RunWithString(fmt.Sprintf("powershell -Command \"(Get-Item %s) -is [System.IO.DirectoryInfo]\"", path), "") + if err != nil { + return fmt.Errorf("Couldn't determine whether destination was a folder or file: %s", err) + } + if strings.Contains(stdout, "True") { + // The path exists and is a directory. + // Upload file into the directory instead of overwriting. + path = filepath.Join(path, filepath.Base((*fi).Name())) + } + log.Printf("Uploading file to '%s'", path) return wcp.Write(path, input) } From a5e61348195383154b9f667c45d36d5270bf00cc Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 3 Oct 2017 15:55:32 -0700 Subject: [PATCH 011/251] implement dir check for ssh communicator --- communicator/ssh/communicator.go | 23 +++++++++++++++++++++++ communicator/winrm/communicator.go | 2 +- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index c10e97dcc..827925f2c 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -533,6 +533,29 @@ func (c *comm) scpUploadSession(path string, input io.Reader, fi *os.FileInfo) e target_dir := filepath.Dir(path) target_file := filepath.Base(path) + // find out if it's a directory + testDirectoryCommand := fmt.Sprintf("if [ -d \"%s\" ]; then echo directory; fi", path) + cmd := &packer.RemoteCmd{Command: testDirectoryCommand} + var buf, buf2 bytes.Buffer + cmd.Stdout = &buf + cmd.Stdout = io.MultiWriter(cmd.Stdout, &buf2) + + err := c.Start(cmd) + + if err != nil { + log.Printf("Unable to check whether remote path is a dir: %s", err) + return err + } + + stdoutToRead := buf2.String() + if strings.Contains(stdoutToRead, "directory") { + log.Printf("upload locale is a directory") + target_dir = path + target_file = filepath.Base((*fi).Name()) + } + + log.Printf("target_file was %s", target_file) + // On windows, filepath.Dir uses backslash seperators (ie. "\tmp"). // This does not work when the target host is unix. Switch to forward slash // which works for unix and windows diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go index a725fe51f..3970d1af8 100644 --- a/communicator/winrm/communicator.go +++ b/communicator/winrm/communicator.go @@ -122,7 +122,7 @@ func runCommand(shell *winrm.Shell, cmd *winrm.Command, rc *packer.RemoteCmd) { } // Upload implementation of communicator.Communicator interface -func (c *Communicator) Upload(path string, input io.Reader, _ *os.FileInfo) error { +func (c *Communicator) Upload(path string, input io.Reader, fi *os.FileInfo) error { wcp, err := c.newCopyClient() if err != nil { return err From b3cc90125dde7fbec29d6c9616df4167efe5f968 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 3 Oct 2017 16:36:27 -0700 Subject: [PATCH 012/251] ssh communicator works --- communicator/ssh/communicator.go | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 827925f2c..3ec2d1aba 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -534,11 +534,8 @@ func (c *comm) scpUploadSession(path string, input io.Reader, fi *os.FileInfo) e target_file := filepath.Base(path) // find out if it's a directory - testDirectoryCommand := fmt.Sprintf("if [ -d \"%s\" ]; then echo directory; fi", path) + testDirectoryCommand := fmt.Sprintf(`test -d "%s"`, path) cmd := &packer.RemoteCmd{Command: testDirectoryCommand} - var buf, buf2 bytes.Buffer - cmd.Stdout = &buf - cmd.Stdout = io.MultiWriter(cmd.Stdout, &buf2) err := c.Start(cmd) @@ -546,10 +543,9 @@ func (c *comm) scpUploadSession(path string, input io.Reader, fi *os.FileInfo) e log.Printf("Unable to check whether remote path is a dir: %s", err) return err } - - stdoutToRead := buf2.String() - if strings.Contains(stdoutToRead, "directory") { - log.Printf("upload locale is a directory") + cmd.Wait() + if cmd.ExitStatus == 0 { + log.Printf("path is a directory; copying file into directory.") target_dir = path target_file = filepath.Base((*fi).Name()) } From 8452ca898cbf5dcf3b8c6c0a7db975dffc18fee8 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 3 Oct 2017 17:06:33 -0700 Subject: [PATCH 013/251] implemented for docker communicator --- builder/docker/communicator.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 13684b2f8..0c4a8833d 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -104,6 +104,21 @@ func (c *Communicator) uploadReader(dst string, src io.Reader) error { // uploadFile uses docker cp to copy the file from the host to the container func (c *Communicator) uploadFile(dst string, src io.Reader, fi *os.FileInfo) error { + // find out if it's a directory + testDirectoryCommand := fmt.Sprintf(`test -d "%s"`, dst) + cmd := &packer.RemoteCmd{Command: testDirectoryCommand} + + err := c.Start(cmd) + + if err != nil { + log.Printf("Unable to check whether remote path is a dir: %s", err) + return err + } + cmd.Wait() + if cmd.ExitStatus == 0 { + log.Printf("path is a directory; copying file into directory.") + dst = filepath.Join(dst, filepath.Base((*fi).Name())) + } // command format: docker cp /path/to/infile containerid:/path/to/outfile log.Printf("Copying to %s on container %s.", dst, c.ContainerID) From 93bddb3e65101d553e596d975098949129dfba36 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 4 Oct 2017 13:35:15 -0700 Subject: [PATCH 014/251] implement directory fix for lxc file uploads --- builder/lxc/communicator.go | 18 ++++++++++++++++++ communicator/ssh/communicator.go | 2 -- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/builder/lxc/communicator.go b/builder/lxc/communicator.go index 8d9765979..77ea3c052 100644 --- a/builder/lxc/communicator.go +++ b/builder/lxc/communicator.go @@ -71,6 +71,24 @@ func (c *LxcAttachCommunicator) Upload(dst string, r io.Reader, fi *os.FileInfo) return err } + if fi != nil { + tfDir := filepath.Dir(tf.Name()) + // rename tempfile to match original file name. This makes sure that if file is being + // moved into a directory, the filename is preserved instead of a temp name. + adjustedTempName := filepath.Join(tfDir, (*fi).Name()) + mvCmd, err := c.CmdWrapper(fmt.Sprintf("sudo mv %s %s", tf.Name(), adjustedTempName)) + if err != nil { + return err + } + defer os.Remove(adjustedTempName) + exitStatus := ShellCommand(mvCmd).Run() + // change cpCmd to use new file name as source + cpCmd, err = c.CmdWrapper(fmt.Sprintf("sudo cp %s %s", adjustedTempName, dst)) + if err != nil { + return err + } + } + log.Printf("Running copy command: %s", dst) return ShellCommand(cpCmd).Run() diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 3ec2d1aba..8d5a388d5 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -550,8 +550,6 @@ func (c *comm) scpUploadSession(path string, input io.Reader, fi *os.FileInfo) e target_file = filepath.Base((*fi).Name()) } - log.Printf("target_file was %s", target_file) - // On windows, filepath.Dir uses backslash seperators (ie. "\tmp"). // This does not work when the target host is unix. Switch to forward slash // which works for unix and windows From e8cabc1e83b6aa99a678f5555a7218cffc20003c Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 4 Oct 2017 15:23:36 -0700 Subject: [PATCH 015/251] implemented for LXD --- builder/lxc/communicator.go | 2 +- builder/lxd/communicator.go | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/builder/lxc/communicator.go b/builder/lxc/communicator.go index 77ea3c052..bb940851a 100644 --- a/builder/lxc/communicator.go +++ b/builder/lxc/communicator.go @@ -81,7 +81,7 @@ func (c *LxcAttachCommunicator) Upload(dst string, r io.Reader, fi *os.FileInfo) return err } defer os.Remove(adjustedTempName) - exitStatus := ShellCommand(mvCmd).Run() + ShellCommand(mvCmd).Run() // change cpCmd to use new file name as source cpCmd, err = c.CmdWrapper(fmt.Sprintf("sudo cp %s %s", adjustedTempName, dst)) if err != nil { diff --git a/builder/lxd/communicator.go b/builder/lxd/communicator.go index 8eaa47a5f..b59e83f82 100644 --- a/builder/lxd/communicator.go +++ b/builder/lxd/communicator.go @@ -54,7 +54,24 @@ func (c *Communicator) Start(cmd *packer.RemoteCmd) error { } func (c *Communicator) Upload(dst string, r io.Reader, fi *os.FileInfo) error { - cpCmd, err := c.CmdWrapper(fmt.Sprintf("lxc file push - %s", filepath.Join(c.ContainerName, dst))) + fileDestination := filepath.Join(c.ContainerName, dst) + // find out if the place we are pushing to is a directory + testDirectoryCommand := fmt.Sprintf(`test -d "%s"`, dst) + cmd := &packer.RemoteCmd{Command: testDirectoryCommand} + err := c.Start(cmd) + + if err != nil { + log.Printf("Unable to check whether remote path is a dir: %s", err) + return err + } + cmd.Wait() + + if cmd.ExitStatus == 0 { + log.Printf("path is a directory; copying file into directory.") + fileDestination = filepath.Join(c.ContainerName, dst, (*fi).Name()) + } + + cpCmd, err := c.CmdWrapper(fmt.Sprintf("lxc file push - %s", fileDestination)) if err != nil { return err } From a79d5eff4ec3ad22d548a007d44d2bf6ad12cafd Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 5 Oct 2017 10:44:18 -0700 Subject: [PATCH 016/251] implement sftp path --- communicator/ssh/communicator.go | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 8d5a388d5..3f668bb8e 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -377,15 +377,31 @@ func (c *comm) connectToAgent() { func (c *comm) sftpUploadSession(path string, input io.Reader, fi *os.FileInfo) error { sftpFunc := func(client *sftp.Client) error { - return sftpUploadFile(path, input, client, fi) + return c.sftpUploadFile(path, input, client, fi) } return c.sftpSession(sftpFunc) } -func sftpUploadFile(path string, input io.Reader, client *sftp.Client, fi *os.FileInfo) error { +func (c *comm) sftpUploadFile(path string, input io.Reader, client *sftp.Client, fi *os.FileInfo) error { log.Printf("[DEBUG] sftp: uploading %s", path) + // find out if destination is a directory (this is to replicate rsync behavior) + testDirectoryCommand := fmt.Sprintf(`test -d "%s"`, path) + cmd := &packer.RemoteCmd{Command: testDirectoryCommand} + + err := c.Start(cmd) + + if err != nil { + log.Printf("Unable to check whether remote path is a dir: %s", err) + return err + } + cmd.Wait() + if cmd.ExitStatus == 0 { + log.Printf("path is a directory; copying file into directory.") + path = filepath.Join(path, filepath.Base((*fi).Name())) + } + f, err := client.Create(path) if err != nil { return err @@ -436,7 +452,7 @@ func (c *comm) sftpUploadDirSession(dst string, src string, excl []string) error return nil } - return sftpVisitFile(finalDst, path, info, client) + return c.sftpVisitFile(finalDst, path, info, client) } return filepath.Walk(src, walkFunc) @@ -445,7 +461,7 @@ func (c *comm) sftpUploadDirSession(dst string, src string, excl []string) error return c.sftpSession(sftpFunc) } -func sftpMkdir(path string, client *sftp.Client, fi os.FileInfo) error { +func (c *comm) sftpMkdir(path string, client *sftp.Client, fi os.FileInfo) error { log.Printf("[DEBUG] sftp: creating dir %s", path) if err := client.Mkdir(path); err != nil { @@ -463,16 +479,16 @@ func sftpMkdir(path string, client *sftp.Client, fi os.FileInfo) error { return nil } -func sftpVisitFile(dst string, src string, fi os.FileInfo, client *sftp.Client) error { +func (c *comm) sftpVisitFile(dst string, src string, fi os.FileInfo, client *sftp.Client) error { if !fi.IsDir() { f, err := os.Open(src) if err != nil { return err } defer f.Close() - return sftpUploadFile(dst, f, client, &fi) + return c.sftpUploadFile(dst, f, client, &fi) } else { - err := sftpMkdir(dst, client, fi) + err := c.sftpMkdir(dst, client, fi) return err } } @@ -533,7 +549,7 @@ func (c *comm) scpUploadSession(path string, input io.Reader, fi *os.FileInfo) e target_dir := filepath.Dir(path) target_file := filepath.Base(path) - // find out if it's a directory + // find out if destination is a directory (this is to replicate rsync behavior) testDirectoryCommand := fmt.Sprintf(`test -d "%s"`, path) cmd := &packer.RemoteCmd{Command: testDirectoryCommand} From 52c0be2d82cf74f29e4d4d5a246001b5e559bcaf Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 5 Oct 2017 16:57:52 -0700 Subject: [PATCH 017/251] fix test --- communicator/winrm/communicator_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/communicator/winrm/communicator_test.go b/communicator/winrm/communicator_test.go index d5eb974ac..3f6b50ae1 100644 --- a/communicator/winrm/communicator_test.go +++ b/communicator/winrm/communicator_test.go @@ -48,6 +48,12 @@ func newMockWinRMServer(t *testing.T) *winrmtest.Remote { func(out, err io.Writer) int { return 0 }) + wrm.CommandFunc( + winrmtest.MatchText(`powershell -Command "(Get-Item C:/Temp/packer.cmd) -is [System.IO.DirectoryInfo]"`), + func(out, err io.Writer) int { + out.Write([]byte("False")) + return 0 + }) return wrm } From 5949bc91c444cc9e84a15184ef7ecdef08b84cc9 Mon Sep 17 00:00:00 2001 From: DanHam Date: Wed, 25 Oct 2017 13:50:58 +0100 Subject: [PATCH 018/251] Extend upload and subsequent 'dot sourcing' of env vars to std PS command * Wrap funcs to flatten and upload env vars with new func prepareEnvVars. While the wrapped funcs could be combined, keeping them separate simplifies testing. * Configure/refactor std and elevated PS to use new funcs to prepare, upload and dot source env vars. * Dot sourcing the env vars in this way avoids the need to embed them directly in the command string. This avoids the need to escape the env vars to ensure the command string is correctly parsed. * Characters within the env vars that are special to PS (such as $'s and backticks) will still need to be escaped to allow them to be correctly interpreted by PS. * The std and elevated PS commands now inject env vars into the remote env via the same mechanism. This ensures consistent behaviour across the two command types. Fixes #5471 --- provisioner/powershell/provisioner.go | 52 +++++++++++++++++++-------- 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go index 4297acf08..3d2f16f85 100644 --- a/provisioner/powershell/provisioner.go +++ b/provisioner/powershell/provisioner.go @@ -113,7 +113,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } if p.config.EnvVarFormat == "" { - p.config.EnvVarFormat = `$env:%s=\"%s\"; ` + p.config.EnvVarFormat = `$env:%s="%s"; ` } if p.config.ElevatedEnvVarFormat == "" { @@ -121,7 +121,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } if p.config.ExecuteCommand == "" { - p.config.ExecuteCommand = `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};{{.Vars}}&'{{.Path}}';exit $LastExitCode }"` + p.config.ExecuteCommand = `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}';exit $LastExitCode }"` } if p.config.ElevatedExecuteCommand == "" { @@ -331,6 +331,19 @@ func (p *Provisioner) retryable(f func() error) error { } } +// Enviroment variables required within the remote environment are uploaded within a PS script and +// then enabled by 'dot sourcing' the script immediately prior to execution of the main command +func (p *Provisioner) prepareEnvVars(elevated bool) (envVarPath string, err error) { + // Collate all required env vars into a plain string with required formatting applied + flattenedEnvVars := p.createFlattenedEnvVars(elevated) + // Create a powershell script on the target build fs containing the flattened env vars + envVarPath, err = p.uploadEnvVars(flattenedEnvVars) + if err != nil { + return "", err + } + return +} + func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { flattened = "" envVars := make(map[string]string) @@ -367,6 +380,19 @@ func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { return } +func (p *Provisioner) uploadEnvVars(flattenedEnvVars string) (envVarPath string, err error) { + // Upload all env vars to a powershell script on the target build file system + envVarReader := strings.NewReader(flattenedEnvVars) + uuid := uuid.TimeOrderedUUID() + envVarPath = fmt.Sprintf(`${env:SYSTEMROOT}\Temp\packer-env-vars-%s.ps1`, uuid) + log.Printf("Uploading env vars to %s", envVarPath) + err = p.communicator.Upload(envVarPath, envVarReader, nil) + if err != nil { + return "", fmt.Errorf("Error uploading ps script containing env vars: %s", err) + } + return +} + func (p *Provisioner) createCommandText() (command string, err error) { // Return the interpolated command if p.config.ElevatedUser == "" { @@ -377,12 +403,15 @@ func (p *Provisioner) createCommandText() (command string, err error) { } func (p *Provisioner) createCommandTextNonPrivileged() (command string, err error) { - // Create environment variables to set before executing the command - flattenedEnvVars := p.createFlattenedEnvVars(false) + // Prepare everything needed to enable the required env vars within the remote environment + envVarPath, err := p.prepareEnvVars(false) + if err != nil { + return "", err + } p.config.ctx.Data = &ExecuteCommandTemplate{ - Vars: flattenedEnvVars, Path: p.config.RemotePath, + Vars: envVarPath, } command, err = interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) @@ -395,17 +424,10 @@ func (p *Provisioner) createCommandTextNonPrivileged() (command string, err erro } func (p *Provisioner) createCommandTextPrivileged() (command string, err error) { - // Can't double escape the env vars, lets create shiny new ones - flattenedEnvVars := p.createFlattenedEnvVars(true) - // Need to create a mini ps1 script containing all of the environment variables we want; - // we'll be dot-sourcing this later - envVarReader := strings.NewReader(flattenedEnvVars) - uuid := uuid.TimeOrderedUUID() - envVarPath := fmt.Sprintf(`${env:SYSTEMROOT}\Temp\packer-env-vars-%s.ps1`, uuid) - log.Printf("Uploading env vars to %s", envVarPath) - err = p.communicator.Upload(envVarPath, envVarReader, nil) + // Prepare everything needed to enable the required env vars within the remote environment + envVarPath, err := p.prepareEnvVars(false) if err != nil { - return "", fmt.Errorf("Error preparing elevated powershell script: %s", err) + return "", err } p.config.ctx.Data = &ExecuteCommandTemplate{ From 4b89fc1c009fb7eb46fc530d805d1dcf3511ff33 Mon Sep 17 00:00:00 2001 From: DanHam Date: Wed, 25 Oct 2017 22:47:08 +0100 Subject: [PATCH 019/251] Fix tests post changes. Add test for upload func. --- provisioner/powershell/provisioner_test.go | 85 +++++++++++++++------- 1 file changed, 58 insertions(+), 27 deletions(-) diff --git a/provisioner/powershell/provisioner_test.go b/provisioner/powershell/provisioner_test.go index e7e64d52e..749564d4d 100644 --- a/provisioner/powershell/provisioner_test.go +++ b/provisioner/powershell/provisioner_test.go @@ -79,8 +79,8 @@ func TestProvisionerPrepare_Defaults(t *testing.T) { t.Error("expected elevated_password to be empty") } - if p.config.ExecuteCommand != `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};{{.Vars}}&'{{.Path}}';exit $LastExitCode }"` { - t.Fatalf(`Default command should be 'powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};{{.Vars}}&'{{.Path}}';exit $LastExitCode }"', but got '%s'`, p.config.ExecuteCommand) + if p.config.ExecuteCommand != `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}';exit $LastExitCode }"` { + t.Fatalf(`Default command should be 'powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}';exit $LastExitCode }"', but got '%s'`, p.config.ExecuteCommand) } if p.config.ElevatedExecuteCommand != `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}'; exit $LastExitCode }"` { @@ -403,7 +403,7 @@ func TestProvisionerProvision_Inline(t *testing.T) { ui := testUi() p := new(Provisioner) - // Defaults provided by Packer + // Defaults provided by Packer - env vars should not appear in cmd p.config.PackerBuildName = "vmware" p.config.PackerBuilderType = "iso" comm := new(packer.MockCommunicator) @@ -413,11 +413,14 @@ func TestProvisionerProvision_Inline(t *testing.T) { t.Fatal("should not have error") } - expectedCommand := `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; &'c:/Windows/Temp/inlineScript.ps1';exit $LastExitCode }"` - if comm.StartCmd.Command != expectedCommand { - t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) + cmd := comm.StartCmd.Command + re := regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/inlineScript.ps1';exit \$LastExitCode }"`) + matched := re.MatchString(cmd) + if !matched { + t.Fatalf("Got unexpected command: %s", cmd) } + // User supplied env vars should not change things envVars := make([]string, 2) envVars[0] = "FOO=BAR" envVars[1] = "BAR=BAZ" @@ -430,9 +433,11 @@ func TestProvisionerProvision_Inline(t *testing.T) { t.Fatal("should not have error") } - expectedCommand = `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:BAR=\"BAZ\"; $env:FOO=\"BAR\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; &'c:/Windows/Temp/inlineScript.ps1';exit $LastExitCode }"` - if comm.StartCmd.Command != expectedCommand { - t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) + cmd = comm.StartCmd.Command + re = regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/inlineScript.ps1';exit \$LastExitCode }"`) + matched = re.MatchString(cmd) + if !matched { + t.Fatalf("Got unexpected command: %s", cmd) } } @@ -455,11 +460,12 @@ func TestProvisionerProvision_Scripts(t *testing.T) { t.Fatal("should not have error") } - expectedCommand := `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:PACKER_BUILDER_TYPE=\"footype\"; $env:PACKER_BUILD_NAME=\"foobuild\"; &'c:/Windows/Temp/script.ps1';exit $LastExitCode }"` - if comm.StartCmd.Command != expectedCommand { - t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) + cmd := comm.StartCmd.Command + re := regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/script.ps1';exit \$LastExitCode }"`) + matched := re.MatchString(cmd) + if !matched { + t.Fatalf("Got unexpected command: %s", cmd) } - } func TestProvisionerProvision_ScriptsWithEnvVars(t *testing.T) { @@ -488,9 +494,11 @@ func TestProvisionerProvision_ScriptsWithEnvVars(t *testing.T) { t.Fatal("should not have error") } - expectedCommand := `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:BAR=\"BAZ\"; $env:FOO=\"BAR\"; $env:PACKER_BUILDER_TYPE=\"footype\"; $env:PACKER_BUILD_NAME=\"foobuild\"; &'c:/Windows/Temp/script.ps1';exit $LastExitCode }"` - if comm.StartCmd.Command != expectedCommand { - t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) + cmd := comm.StartCmd.Command + re := regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/script.ps1';exit \$LastExitCode }"`) + matched := re.MatchString(cmd) + if !matched { + t.Fatalf("Got unexpected command: %s", cmd) } } @@ -547,11 +555,11 @@ func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) { {"FOO==bar"}, // User env var with value starting with equals } expected := []string{ - `$env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, - `$env:FOO=\"bar\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, - `$env:BAZ=\"qux\"; $env:FOO=\"bar\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, - `$env:FOO=\"bar=baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, - `$env:FOO=\"=bar\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, + `$env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, + `$env:FOO="bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, + `$env:BAZ="qux"; $env:FOO="bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, + `$env:FOO="bar=baz"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, + `$env:FOO="=bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, } p := new(Provisioner) @@ -571,7 +579,6 @@ func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) { } func TestProvision_createCommandText(t *testing.T) { - config := testConfig() config["remote_path"] = "c:/Windows/Temp/script.ps1" p := new(Provisioner) @@ -586,22 +593,46 @@ func TestProvision_createCommandText(t *testing.T) { // Non-elevated cmd, _ := p.createCommandText() - expectedCommand := `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; &'c:/Windows/Temp/script.ps1';exit $LastExitCode }"` - - if cmd != expectedCommand { - t.Fatalf("Expected Non-elevated command: %s, got %s", expectedCommand, cmd) + re := regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/script.ps1';exit \$LastExitCode }"`) + matched := re.MatchString(cmd) + if !matched { + t.Fatalf("Got unexpected command: %s", cmd) } // Elevated p.config.ElevatedUser = "vagrant" p.config.ElevatedPassword = "vagrant" cmd, _ = p.createCommandText() - matched, _ := regexp.MatchString("powershell -executionpolicy bypass -file \"%TEMP%(.{1})packer-elevated-shell.*", cmd) + re = regexp.MustCompile(`powershell -executionpolicy bypass -file "%TEMP%\\packer-elevated-shell-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1"`) + matched = re.MatchString(cmd) if !matched { t.Fatalf("Got unexpected elevated command: %s", cmd) } } +func TestProvision_uploadEnvVars(t *testing.T) { + p := new(Provisioner) + comm := new(packer.MockCommunicator) + p.communicator = comm + + flattenedEnvVars := `$env:PACKER_BUILDER_TYPE="footype"; $env:PACKER_BUILD_NAME="foobuild";` + + envVarPath, err := p.uploadEnvVars(flattenedEnvVars) + if err != nil { + t.Fatalf("Did not expect error: %s", err.Error()) + } + + if comm.UploadCalled != true { + t.Fatalf("Failed to upload env var file") + } + + re := regexp.MustCompile(`\${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1`) + matched := re.MatchString(envVarPath) + if !matched { + t.Fatalf("Got unexpected path for env var file: %s", envVarPath) + } +} + func TestProvision_generateElevatedShellRunner(t *testing.T) { // Non-elevated From 3cdc79de6a9fc5c6bf276b3e9f3627eeb0c0c5f5 Mon Sep 17 00:00:00 2001 From: DanHam Date: Wed, 25 Oct 2017 23:06:01 +0100 Subject: [PATCH 020/251] Update docs to reflect new upload and dot source of env var for std ps cmd --- .../docs/provisioners/powershell.html.md | 50 ++++++++++--------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/website/source/docs/provisioners/powershell.html.md b/website/source/docs/provisioners/powershell.html.md index f085a6e82..a096bf2f5 100644 --- a/website/source/docs/provisioners/powershell.html.md +++ b/website/source/docs/provisioners/powershell.html.md @@ -1,8 +1,8 @@ --- description: | - The shell Packer provisioner provisions machines built by Packer using shell - scripts. Shell provisioning is the easiest way to get software installed and - configured on a machine. + The shell Packer provisioner provisions machines built by Packer using + shell scripts. Shell provisioning is the easiest way to get software + installed and configured on a machine. layout: docs page_title: 'PowerShell - Provisioners' sidebar_current: 'docs-provisioners-powershell' @@ -29,20 +29,21 @@ The example below is fully functional. ## Configuration Reference The reference of available configuration options is listed below. The only -required element is either "inline" or "script". Every other option is optional. +required element is either "inline" or "script". Every other option is +optional. Exactly *one* of the following is required: - `inline` (array of strings) - This is an array of commands to execute. The - commands are concatenated by newlines and turned into a single file, so they - are all executed within the same context. This allows you to change + commands are concatenated by newlines and turned into a single file, so + they are all executed within the same context. This allows you to change directories in one command and use something in the directory in the next and so on. Inline scripts are the easiest way to pull off simple tasks within the machine. - `script` (string) - The path to a script to upload and execute in - the machine. This path can be absolute or relative. If it is relative, it is - relative to the working directory when Packer is executed. + the machine. This path can be absolute or relative. If it is relative, it + is relative to the working directory when Packer is executed. - `scripts` (array of strings) - An array of scripts to execute. The scripts will be uploaded and executed in the order specified. Each script is @@ -51,12 +52,12 @@ Exactly *one* of the following is required: Optional parameters: -- `binary` (boolean) - If true, specifies that the script(s) are binary files, - and Packer should therefore not convert Windows line endings to Unix line - endings (if there are any). By default this is false. +- `binary` (boolean) - If true, specifies that the script(s) are binary + files, and Packer should therefore not convert Windows line endings to Unix + line endings (if there are any). By default this is false. -- `elevated_execute_command` (string) - The command to use to execute the elevated - script. By default this is as follows: +- `elevated_execute_command` (string) - The command to use to execute the + elevated script. By default this is as follows: ``` powershell powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}'; exit $LastExitCode }" @@ -65,32 +66,34 @@ Optional parameters: The value of this is treated as [configuration template](/docs/templates/engine.html). There are two available variables: `Path`, which is the path to the script to run, and - `Vars`, which is the location of a temp file containing the list of `environment_vars`, if configured. + `Vars`, which is the location of a temp file containing the list of + `environment_vars`, if configured. - `environment_vars` (array of strings) - An array of key/value pairs to inject prior to the execute\_command. The format should be `key=value`. - Packer injects some environmental variables by default into the environment, - as well, which are covered in the section below. + Packer injects some environmental variables by default into the + environment, as well, which are covered in the section below. - `execute_command` (string) - The command to use to execute the script. By default this is as follows: ``` powershell - powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};{{.Vars}}&'{{.Path}}';exit $LastExitCode }" + powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}'; exit $LastExitCode }" ``` The value of this is treated as [configuration template](/docs/templates/engine.html). There are two available variables: `Path`, which is the path to the script to run, and - `Vars`, which is the list of `environment_vars`, if configured. + `Vars`, which is the location of a temp file containing the list of + `environment_vars`, if configured. - `elevated_user` and `elevated_password` (string) - If specified, the PowerShell script will be run with elevated privileges using the given Windows user. - `remote_path` (string) - The path where the script will be uploaded to in - the machine. This defaults to "c:/Windows/Temp/script.ps1". This value must be a - writable location and any parent directories must already exist. + the machine. This defaults to "c:/Windows/Temp/script.ps1". This value must + be a writable location and any parent directories must already exist. - `start_retry_timeout` (string) - The amount of time to attempt to *start* the remote process. By default this is "5m" or 5 minutes. This setting @@ -111,9 +114,10 @@ commonly useful environmental variables: This is most useful when Packer is making multiple builds and you want to distinguish them slightly from a common provisioning script. -- `PACKER_BUILDER_TYPE` is the type of the builder that was used to create the - machine that the script is running on. This is useful if you want to run - only certain parts of the script on systems built with certain builders. +- `PACKER_BUILDER_TYPE` is the type of the builder that was used to create + the machine that the script is running on. This is useful if you want to + run only certain parts of the script on systems built with certain + builders. - `PACKER_HTTP_ADDR` If using a builder that provides an http server for file transfer (such as hyperv, parallels, qemu, virtualbox, and vmware), this From b754b715191920cc40e2a2e69e20b2f491cf903e Mon Sep 17 00:00:00 2001 From: bugbuilder Date: Fri, 10 Nov 2017 22:57:39 -0300 Subject: [PATCH 021/251] return vsphere artifact to can build template --- .../vsphere-template/step_mark_as_template.go | 13 +++++++------ post-processor/vsphere/post-processor.go | 2 ++ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/post-processor/vsphere-template/step_mark_as_template.go b/post-processor/vsphere-template/step_mark_as_template.go index 8f95ba4d6..2ffe46956 100644 --- a/post-processor/vsphere-template/step_mark_as_template.go +++ b/post-processor/vsphere-template/step_mark_as_template.go @@ -27,6 +27,7 @@ func NewStepMarkAsTemplate(artifact packer.Artifact) *stepMarkAsTemplate { if artifact.BuilderId() == vsphere.BuilderId { id := strings.Split(artifact.Id(), "::") remoteFolder = id[1] + vmname = id[2] } return &stepMarkAsTemplate{ @@ -50,12 +51,6 @@ func (s *stepMarkAsTemplate) Run(state multistep.StateBag) multistep.StepAction return multistep.ActionHalt } - if err := unregisterPreviousVM(cli, folder, s.VMName); err != nil { - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt - } - dsPath, err := datastorePath(vm) if err != nil { state.Put("error", err) @@ -76,6 +71,12 @@ func (s *stepMarkAsTemplate) Run(state multistep.StateBag) multistep.StepAction return multistep.ActionHalt } + if err := unregisterPreviousVM(cli, folder, s.VMName); err != nil { + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + task, err := folder.RegisterVM(context.Background(), dsPath.String(), s.VMName, true, nil, host) if err != nil { state.Put("error", err) diff --git a/post-processor/vsphere/post-processor.go b/post-processor/vsphere/post-processor.go index c028cd087..b9702fe82 100644 --- a/post-processor/vsphere/post-processor.go +++ b/post-processor/vsphere/post-processor.go @@ -140,6 +140,8 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Message(p.filterLog(out.String())) + artifact = NewArtifact(p.config.Datastore, p.config.VMFolder, p.config.VMName, artifact.Files()) + return artifact, false, nil } From 4acc98a729e2545cb7285fbf262f6268ac162b25 Mon Sep 17 00:00:00 2001 From: Andrew Pennebaker Date: Thu, 7 Dec 2017 23:15:56 -0600 Subject: [PATCH 022/251] add super key (vmware builder) --- .../vmware/common/step_type_boot_command.go | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/builder/vmware/common/step_type_boot_command.go b/builder/vmware/common/step_type_boot_command.go index 28db3c72e..b100b609e 100644 --- a/builder/vmware/common/step_type_boot_command.go +++ b/builder/vmware/common/step_type_boot_command.go @@ -183,6 +183,8 @@ func vncSendString(c *vnc.ClientConn, original string) { special[""] = 0xFFEA special[""] = 0xFFE4 special[""] = 0xFFE2 + special[""] = 0xFFEB + special[""] = 0xFFEC shiftedChars := "~!@#$%^&*()_+{}|:\"<>?" @@ -231,6 +233,17 @@ func vncSendString(c *vnc.ClientConn, original string) { continue } + if strings.HasPrefix(original, "") { + keyCode = special[""] + original = original[len(""):] + log.Printf("Special code '' found, replacing with: %d", keyCode) + + c.KeyEvent(keyCode, true) + time.Sleep(keyInterval) + + continue + } + if strings.HasPrefix(original, "") { keyCode = special[""] original = original[len(""):] @@ -264,6 +277,17 @@ func vncSendString(c *vnc.ClientConn, original string) { continue } + if strings.HasPrefix(original, "") { + keyCode = special[""] + original = original[len(""):] + log.Printf("Special code '' found, replacing with: %d", keyCode) + + c.KeyEvent(keyCode, false) + time.Sleep(keyInterval) + + continue + } + if strings.HasPrefix(original, "") { keyCode = special[""] original = original[len(""):] @@ -297,6 +321,17 @@ func vncSendString(c *vnc.ClientConn, original string) { continue } + if strings.HasPrefix(original, "") { + keyCode = special[""] + original = original[len(""):] + log.Printf("Special code '' found, replacing with: %d", keyCode) + + c.KeyEvent(keyCode, true) + time.Sleep(keyInterval) + + continue + } + if strings.HasPrefix(original, "") { keyCode = special[""] original = original[len(""):] @@ -330,6 +365,17 @@ func vncSendString(c *vnc.ClientConn, original string) { continue } + if strings.HasPrefix(original, "") { + keyCode = special[""] + original = original[len(""):] + log.Printf("Special code '' found, replacing with: %d", keyCode) + + c.KeyEvent(keyCode, false) + time.Sleep(keyInterval) + + continue + } + if strings.HasPrefix(original, "") { log.Printf("Special code '' found, sleeping one second") time.Sleep(1 * time.Second) From d069dc5b7c18e39ac332ab12566cdf801b14bd32 Mon Sep 17 00:00:00 2001 From: Andrew Pennebaker Date: Sat, 9 Dec 2017 12:02:20 -0600 Subject: [PATCH 023/251] handle holding a-z keys, such as for boot options (vmware builder) --- .../vmware/common/step_type_boot_command.go | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/builder/vmware/common/step_type_boot_command.go b/builder/vmware/common/step_type_boot_command.go index b100b609e..520ed4320 100644 --- a/builder/vmware/common/step_type_boot_command.go +++ b/builder/vmware/common/step_type_boot_command.go @@ -5,6 +5,7 @@ import ( "log" "net" "os" + "regexp" "runtime" "strings" "time" @@ -195,6 +196,9 @@ func vncSendString(c *vnc.ClientConn, original string) { keyInterval = delay } + azOnRegex := regexp.MustCompile("^<(?P[a-zA-Z])On>") + azOffRegex := regexp.MustCompile("^<(?P[a-zA-Z])Off>") + // TODO(mitchellh): Ripe for optimizations of some point, perhaps. for len(original) > 0 { var keyCode uint32 @@ -244,6 +248,25 @@ func vncSendString(c *vnc.ClientConn, original string) { continue } + if azOnRegex.MatchString(original) { + m := azOnRegex.FindStringSubmatch(original) + r, _ := utf8.DecodeRuneInString(m[1]) + original = original[len(""):] + keyCode = uint32(r) + keyShift = unicode.IsUpper(r) || strings.ContainsRune(shiftedChars, r) + + log.Printf("Special code '%s' found, replacing with %d, shift %v", m[0], keyCode, keyShift) + + if keyShift { + c.KeyEvent(KeyLeftShift, true) + } + + c.KeyEvent(keyCode, true) + time.Sleep(keyInterval) + + continue + } + if strings.HasPrefix(original, "") { keyCode = special[""] original = original[len(""):] @@ -288,6 +311,25 @@ func vncSendString(c *vnc.ClientConn, original string) { continue } + if azOffRegex.MatchString(original) { + m := azOffRegex.FindStringSubmatch(original) + r, _ := utf8.DecodeRuneInString(m[1]) + original = original[len(""):] + keyCode = uint32(r) + keyShift = unicode.IsUpper(r) || strings.ContainsRune(shiftedChars, r) + + log.Printf("Special code '%s' found, replacing with %d, shift %v", m[0], keyCode, keyShift) + + if keyShift { + c.KeyEvent(KeyLeftShift, false) + } + + c.KeyEvent(keyCode, false) + time.Sleep(keyInterval) + + continue + } + if strings.HasPrefix(original, "") { keyCode = special[""] original = original[len(""):] From 54f059d3d4006cc1481709d8485ff14c67c03f43 Mon Sep 17 00:00:00 2001 From: Vijaya Bhaskar Reddy Kondreddi Date: Fri, 13 Oct 2017 00:46:24 +0530 Subject: [PATCH 024/251] Add support for skip export --- builder/hyperv/common/step_create_vm.go | 8 +++++ builder/hyperv/common/step_export_vm.go | 46 +++++++++++++------------ builder/hyperv/iso/builder.go | 5 +++ builder/hyperv/vmcx/builder.go | 3 ++ 4 files changed, 40 insertions(+), 22 deletions(-) diff --git a/builder/hyperv/common/step_create_vm.go b/builder/hyperv/common/step_create_vm.go index 02171d9c6..852880911 100644 --- a/builder/hyperv/common/step_create_vm.go +++ b/builder/hyperv/common/step_create_vm.go @@ -28,6 +28,8 @@ type StepCreateVM struct { EnableVirtualizationExtensions bool AdditionalDiskSize []uint DifferencingDisk bool + SkipExport bool + OutputDir string } func (s *StepCreateVM) Run(state multistep.StateBag) multistep.StepAction { @@ -51,6 +53,12 @@ func (s *StepCreateVM) Run(state multistep.StateBag) multistep.StepAction { } vhdPath := state.Get("packerVhdTempDir").(string) + + // inline vhd path if export is skipped + if s.SkipExport { + vhdPath = filepath.Join(s.OutputDir, "Virtual Hard Disks") + } + // convert the MB to bytes ramSize := int64(s.RamSize * 1024 * 1024) diskSize := int64(s.DiskSize * 1024 * 1024) diff --git a/builder/hyperv/common/step_export_vm.go b/builder/hyperv/common/step_export_vm.go index 8c32052a3..a9c2097a5 100644 --- a/builder/hyperv/common/step_export_vm.go +++ b/builder/hyperv/common/step_export_vm.go @@ -17,18 +17,19 @@ const ( type StepExportVm struct { OutputDir string SkipCompaction bool + SkipExport bool } func (s *StepExportVm) Run(state multistep.StateBag) multistep.StepAction { driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) - var err error var errorMsg string vmName := state.Get("vmName").(string) tmpPath := state.Get("packerTempDir").(string) outputPath := s.OutputDir + expPath := s.OutputDir // create temp path to export vm errorMsg = "Error creating temp export path: %s" @@ -39,21 +40,21 @@ func (s *StepExportVm) Run(state multistep.StateBag) multistep.StepAction { ui.Error(err.Error()) return multistep.ActionHalt } + if !s.SkipExport { + ui.Say("Exporting vm...") - ui.Say("Exporting vm...") - - err = driver.ExportVirtualMachine(vmName, vmExportPath) - if err != nil { - errorMsg = "Error exporting vm: %s" - err := fmt.Errorf(errorMsg, err) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt + err = driver.ExportVirtualMachine(vmName, vmExportPath) + if err != nil { + errorMsg = "Error exporting vm: %s" + err := fmt.Errorf(errorMsg, err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + // copy to output dir + expPath = filepath.Join(vmExportPath, vmName) } - // copy to output dir - expPath := filepath.Join(vmExportPath, vmName) - if s.SkipCompaction { ui.Say("Skipping disk compaction...") } else { @@ -68,16 +69,17 @@ func (s *StepExportVm) Run(state multistep.StateBag) multistep.StepAction { } } - ui.Say("Copying to output dir...") - err = driver.CopyExportedVirtualMachine(expPath, outputPath, vhdDir, vmDir) - if err != nil { - errorMsg = "Error exporting vm: %s" - err := fmt.Errorf(errorMsg, err) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt + if !s.SkipExport { + ui.Say("Copying to output dir...") + err = driver.CopyExportedVirtualMachine(expPath, outputPath, vhdDir, vmDir) + if err != nil { + errorMsg = "Error exporting vm: %s" + err := fmt.Errorf(errorMsg, err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } } - return multistep.ActionContinue } diff --git a/builder/hyperv/iso/builder.go b/builder/hyperv/iso/builder.go index 849cbcacf..35ed2c698 100644 --- a/builder/hyperv/iso/builder.go +++ b/builder/hyperv/iso/builder.go @@ -94,6 +94,8 @@ type Config struct { SkipCompaction bool `mapstructure:"skip_compaction"` + SkipExport bool `mapstructure:"skip_export"` + // Use differencing disk DifferencingDisk bool `mapstructure:"differencing_disk"` @@ -357,6 +359,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe EnableVirtualizationExtensions: b.config.EnableVirtualizationExtensions, AdditionalDiskSize: b.config.AdditionalDiskSize, DifferencingDisk: b.config.DifferencingDisk, + SkipExport: b.config.SkipExport, + OutputDir: b.config.OutputDir, }, &hypervcommon.StepEnableIntegrationService{}, @@ -422,6 +426,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &hypervcommon.StepExportVm{ OutputDir: b.config.OutputDir, SkipCompaction: b.config.SkipCompaction, + SkipExport: b.config.SkipExport, }, // the clean up actions for each step will be executed reverse order diff --git a/builder/hyperv/vmcx/builder.go b/builder/hyperv/vmcx/builder.go index 6a976b3df..51997a384 100644 --- a/builder/hyperv/vmcx/builder.go +++ b/builder/hyperv/vmcx/builder.go @@ -94,6 +94,8 @@ type Config struct { SkipCompaction bool `mapstructure:"skip_compaction"` + SkipExport bool `mapstructure:"skip_export"` + ctx interpolate.Context } @@ -469,6 +471,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &hypervcommon.StepExportVm{ OutputDir: b.config.OutputDir, SkipCompaction: b.config.SkipCompaction, + SkipExport: b.config.SkipExport, }, // the clean up actions for each step will be executed reverse order From 5346583df96f06b888b6a2a69a652c61605751df Mon Sep 17 00:00:00 2001 From: Vijaya Bhaskar Reddy Kondreddi Date: Tue, 28 Nov 2017 14:56:58 +0530 Subject: [PATCH 025/251] Update docs --- website/source/docs/builders/hyperv-iso.html.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/website/source/docs/builders/hyperv-iso.html.md b/website/source/docs/builders/hyperv-iso.html.md index ff267392c..24fee6f05 100644 --- a/website/source/docs/builders/hyperv-iso.html.md +++ b/website/source/docs/builders/hyperv-iso.html.md @@ -97,6 +97,12 @@ can be configured for this builder. - `disk_size` (number) - The size, in megabytes, of the hard disk to create for the VM. By default, this is 40 GB. +- `differencing_disk` (boolean) - If true enables differencing disks. Only the changes will be written to the new disk. This is especially useful if your + source is a vhd/vhdx. This defaults to false. + +- `skip_export` (boolean) - If true skips VM export. If you are interested only in the vhd/vhdx files, you can enable this option. This will create + inline disks which improves the build performance. There will not be any copying of source vhds to temp directory. This defauls to false. + - `enable_dynamic_memory` (boolean) - If true enable dynamic memory for virtual machine. This defaults to false. From 893d0334f183804efdbe984fb4015377bee41857 Mon Sep 17 00:00:00 2001 From: frankdannhauer Date: Thu, 14 Dec 2017 14:00:52 +0100 Subject: [PATCH 026/251] Fix #5335 https://github.com/hashicorp/packer/issues/5335 --- provisioner/ansible-local/provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provisioner/ansible-local/provisioner.go b/provisioner/ansible-local/provisioner.go index ec1eec041..e007bfb85 100644 --- a/provisioner/ansible-local/provisioner.go +++ b/provisioner/ansible-local/provisioner.go @@ -304,7 +304,7 @@ func (p *Provisioner) executeAnsible(ui packer.Ui, comm packer.Communicator) err playbook := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(p.config.PlaybookFile))) inventory := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(p.config.InventoryFile))) - extraArgs := fmt.Sprintf(" --extra-vars \"packer_build_name=%s packer_builder_type=%s packer_http_addr=%s\" ", + extraArgs := fmt.Sprintf(" --extra-vars \\\"packer_build_name=%s packer_builder_type=%s packer_http_addr=%s\\\" ", p.config.PackerBuildName, p.config.PackerBuilderType, common.GetHTTPAddr()) if len(p.config.ExtraArguments) > 0 { extraArgs = extraArgs + strings.Join(p.config.ExtraArguments, " ") From c338cb79d05629a5b6462ebcd122003d90e95ea6 Mon Sep 17 00:00:00 2001 From: Ben Phegan Date: Fri, 15 Dec 2017 13:24:15 +1100 Subject: [PATCH 027/251] Initial commit of feature to allow MAC address specification for HyperV builders --- builder/hyperv/common/driver.go | 2 ++ builder/hyperv/common/driver_mock.go | 12 ++++++++++++ builder/hyperv/common/driver_ps_4.go | 4 ++++ builder/hyperv/common/step_clone_vm.go | 11 +++++++++++ builder/hyperv/common/step_create_vm.go | 11 +++++++++++ builder/hyperv/iso/builder.go | 1 + builder/hyperv/vmcx/builder.go | 2 ++ common/powershell/hyperv/hyperv.go | 12 ++++++++++++ website/source/docs/builders/hyperv-iso.html.md | 6 +++++- website/source/docs/builders/hyperv-vmcx.html.md | 4 ++++ 10 files changed, 64 insertions(+), 1 deletion(-) diff --git a/builder/hyperv/common/driver.go b/builder/hyperv/common/driver.go index 2a2f0b810..571214acd 100644 --- a/builder/hyperv/common/driver.go +++ b/builder/hyperv/common/driver.go @@ -52,6 +52,8 @@ type Driver interface { //Set the vlan to use for machine SetVirtualMachineVlanId(string, string) error + SetVmNetworkAdapterMacAddress(string, string) error + UntagVirtualMachineNetworkAdapterVlan(string, string) error CreateExternalVirtualSwitch(string, string) error diff --git a/builder/hyperv/common/driver_mock.go b/builder/hyperv/common/driver_mock.go index 3823cbd12..7d8e02c6a 100644 --- a/builder/hyperv/common/driver_mock.go +++ b/builder/hyperv/common/driver_mock.go @@ -67,6 +67,11 @@ type DriverMock struct { SetNetworkAdapterVlanId_VlanId string SetNetworkAdapterVlanId_Err error + SetVmNetworkAdapterMacAddress_Called bool + SetVmNetworkAdapterMacAddress_VmName string + SetVmNetworkAdapterMacAddress_Mac string + SetVmNetworkAdapterMacAddress_Err error + SetVirtualMachineVlanId_Called bool SetVirtualMachineVlanId_VmName string SetVirtualMachineVlanId_VlanId string @@ -318,6 +323,13 @@ func (d *DriverMock) SetNetworkAdapterVlanId(switchName string, vlanId string) e return d.SetNetworkAdapterVlanId_Err } +func (d *DriverMock) SetVmNetworkAdapterMacAddress(vmName string, mac string) error { + d.SetVmNetworkAdapterMacAddress_Called = true + d.SetVmNetworkAdapterMacAddress_VmName = vmName + d.SetVmNetworkAdapterMacAddress_Mac = mac + return d.SetVmNetworkAdapterMacAddress_Err +} + func (d *DriverMock) SetVirtualMachineVlanId(vmName string, vlanId string) error { d.SetVirtualMachineVlanId_Called = true d.SetVirtualMachineVlanId_VmName = vmName diff --git a/builder/hyperv/common/driver_ps_4.go b/builder/hyperv/common/driver_ps_4.go index 853aa7021..a6c1b7352 100644 --- a/builder/hyperv/common/driver_ps_4.go +++ b/builder/hyperv/common/driver_ps_4.go @@ -146,6 +146,10 @@ func (d *HypervPS4Driver) SetVirtualMachineVlanId(vmName string, vlanId string) return hyperv.SetVirtualMachineVlanId(vmName, vlanId) } +func (d *HypervPS4Driver) SetVmNetworkAdapterMacAddress(vmName string, mac string) error { + return hyperv.SetVmNetworkAdapterMacAddress(vmName, mac) +} + func (d *HypervPS4Driver) UntagVirtualMachineNetworkAdapterVlan(vmName string, switchName string) error { return hyperv.UntagVirtualMachineNetworkAdapterVlan(vmName, switchName) } diff --git a/builder/hyperv/common/step_clone_vm.go b/builder/hyperv/common/step_clone_vm.go index 4a5b55566..24d731b02 100644 --- a/builder/hyperv/common/step_clone_vm.go +++ b/builder/hyperv/common/step_clone_vm.go @@ -27,6 +27,7 @@ type StepCloneVM struct { EnableDynamicMemory bool EnableSecureBoot bool EnableVirtualizationExtensions bool + MacAddress string } func (s *StepCloneVM) Run(state multistep.StateBag) multistep.StepAction { @@ -117,6 +118,16 @@ func (s *StepCloneVM) Run(state multistep.StateBag) multistep.StepAction { } } + if s.MacAddress != "" { + err = driver.SetVmNetworkAdapterMacAddress(s.VMName, s.MacAddress) + if err != nil { + err := fmt.Errorf("Error setting MAC address: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + } + // Set the final name in the state bag so others can use it state.Put("vmName", s.VMName) diff --git a/builder/hyperv/common/step_create_vm.go b/builder/hyperv/common/step_create_vm.go index 02171d9c6..92fdbcb04 100644 --- a/builder/hyperv/common/step_create_vm.go +++ b/builder/hyperv/common/step_create_vm.go @@ -28,6 +28,7 @@ type StepCreateVM struct { EnableVirtualizationExtensions bool AdditionalDiskSize []uint DifferencingDisk bool + MacAddress string } func (s *StepCreateVM) Run(state multistep.StateBag) multistep.StepAction { @@ -124,6 +125,16 @@ func (s *StepCreateVM) Run(state multistep.StateBag) multistep.StepAction { } } + if s.MacAddress != "" { + err = driver.SetVmNetworkAdapterMacAddress(s.VMName, s.MacAddress) + if err != nil { + err := fmt.Errorf("Error setting MAC address: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + } + // Set the final name in the state bag so others can use it state.Put("vmName", s.VMName) diff --git a/builder/hyperv/iso/builder.go b/builder/hyperv/iso/builder.go index 849cbcacf..fd27cf94b 100644 --- a/builder/hyperv/iso/builder.go +++ b/builder/hyperv/iso/builder.go @@ -74,6 +74,7 @@ type Config struct { BootCommand []string `mapstructure:"boot_command"` SwitchName string `mapstructure:"switch_name"` SwitchVlanId string `mapstructure:"switch_vlan_id"` + MacAddress string `mapstructure:"mac_address"` VlanId string `mapstructure:"vlan_id"` Cpu uint `mapstructure:"cpu"` Generation uint `mapstructure:"generation"` diff --git a/builder/hyperv/vmcx/builder.go b/builder/hyperv/vmcx/builder.go index 6a976b3df..ae0e4c61e 100644 --- a/builder/hyperv/vmcx/builder.go +++ b/builder/hyperv/vmcx/builder.go @@ -82,6 +82,7 @@ type Config struct { BootCommand []string `mapstructure:"boot_command"` SwitchName string `mapstructure:"switch_name"` SwitchVlanId string `mapstructure:"switch_vlan_id"` + MacAddress string `mapstructure:"mac_address"` VlanId string `mapstructure:"vlan_id"` Cpu uint `mapstructure:"cpu"` Generation uint @@ -403,6 +404,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe EnableDynamicMemory: b.config.EnableDynamicMemory, EnableSecureBoot: b.config.EnableSecureBoot, EnableVirtualizationExtensions: b.config.EnableVirtualizationExtensions, + MacAddress: b.config.MacAddress, }, &hypervcommon.StepEnableIntegrationService{}, diff --git a/common/powershell/hyperv/hyperv.go b/common/powershell/hyperv/hyperv.go index 25073d238..d26ec5eeb 100644 --- a/common/powershell/hyperv/hyperv.go +++ b/common/powershell/hyperv/hyperv.go @@ -319,6 +319,18 @@ Copy-Item $cloneFromVmxcPath $exportPath -Recurse -Force return err } +func SetVmNetworkAdapterMacAddress(vmName string, mac string) error { + var script = ` +param([string]$vmName, [string]$mac) +Set-VMNetworkAdapter $vmName -staticmacaddress $mac + ` + + var ps powershell.PowerShellCmd + err := ps.Run(script, vmName, mac) + + return err +} + func ImportVmxcVirtualMachine(importPath string, vmName string, harddrivePath string, ram int64, switchName string) error { var script = ` param([string]$importPath, [string]$vmName, [string]$harddrivePath, [long]$memoryStartupBytes, [string]$switchName) diff --git a/website/source/docs/builders/hyperv-iso.html.md b/website/source/docs/builders/hyperv-iso.html.md index ff267392c..41e603129 100644 --- a/website/source/docs/builders/hyperv-iso.html.md +++ b/website/source/docs/builders/hyperv-iso.html.md @@ -204,7 +204,7 @@ can be configured for this builder. By default none is set. If none is set then a vlan is not set on the switch's network card. If this value is set it should match the vlan specified in by `vlan_id`. -* `vhd_temp_path` (string) - A separate path to be used for storing the VM's +- `vhd_temp_path` (string) - A separate path to be used for storing the VM's disk image. The purpose is to enable reading and writing to take place on different physical disks (read from VHD temp path, write to regular temp path while exporting the VM) to eliminate a single-disk bottleneck. @@ -213,6 +213,10 @@ can be configured for this builder. for the new virtual machine. By default none is set. If none is set then vlans are not set on the virtual machine's network card. +- `mac_address` (string) - This allows a specific MAC address to be used on the + default virtual network card. The MAC address must be a string with no + delimeters, for example "0000deadbeef". + - `vm_name` (string) - This is the name of the virtual machine for the new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. diff --git a/website/source/docs/builders/hyperv-vmcx.html.md b/website/source/docs/builders/hyperv-vmcx.html.md index 1aac29fc1..a7afe93ce 100644 --- a/website/source/docs/builders/hyperv-vmcx.html.md +++ b/website/source/docs/builders/hyperv-vmcx.html.md @@ -225,6 +225,10 @@ can be configured for this builder. for the new virtual machine. By default none is set. If none is set then vlans are not set on the virtual machine's network card. +- `mac_address` (string) - This allows a specific MAC address to be used on the + default virtual network card. The MAC address must be a string with no + delimeters, for example "0000deadbeef". + - `vm_name` (string) - This is the name of the virtual machine for the new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. From 93c2f2ec93b5569752109ef5a0ff575a76d32a67 Mon Sep 17 00:00:00 2001 From: Malet Date: Thu, 21 Dec 2017 17:24:04 +0000 Subject: [PATCH 028/251] Improve vmware-iso disk_type_id documentation Referring to the vmware documentation might be more "correct", but having the options listed will make the available options much clearer. --- website/source/docs/builders/vmware-iso.html.md | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/website/source/docs/builders/vmware-iso.html.md b/website/source/docs/builders/vmware-iso.html.md index 343a685ef..fd56a475f 100644 --- a/website/source/docs/builders/vmware-iso.html.md +++ b/website/source/docs/builders/vmware-iso.html.md @@ -110,10 +110,16 @@ builder. - `disk_type_id` (string) - The type of VMware virtual disk to create. The default is "1", which corresponds to a growable virtual disk split in 2GB files. For ESXi, this defaults to "zeroedthick". This option is for - advanced usage. For more information, please consult the [Virtual Disk - Manager User's Guide](https://www.vmware.com/pdf/VirtualDiskManager.pdf) - for desktop VMware clients. For ESXi, refer to the proper ESXi - documentation. + advanced usage. For ESXi the available options are: `zeroedthick`, `eagerzeroedthick`, `thin`, `rdm:dev`, `rdmp:dev`, `2gbsparse`. For desktop VMware clients: + + Type ID | Description + --- | --- + `0` | Growable virtual disk contained in a single file (monolithic sparse). + `1` | Growable virtual disk split into 2GB files (split sparse). + `2` | Preallocated virtual disk contained in a single file (monolithic flat). + `3` | Preallocated virtual disk split into 2GB files (split flat). + `4` | Preallocated virtual disk compatible with ESX server (VMFS flat). + `5` | Compressed disk optimized for streaming. * `disable_vnc` (boolean) - Whether to create a VNC connection or not. A `boot_command` cannot be used when this is `false`. Defaults to `false`. From e47bd659865bb82b02fa992476e487eabedbf768 Mon Sep 17 00:00:00 2001 From: Sean Malloy Date: Sat, 23 Dec 2017 00:13:17 -0600 Subject: [PATCH 029/251] Add vagrant post-processor support for Google Add the ability to create Google vagrant boxes using the vagrant post-processor. The Google plugin for vagrant is linked below. https://github.com/mitchellh/vagrant-google --- post-processor/vagrant/google.go | 42 +++++++++++++++++++ post-processor/vagrant/post-processor.go | 3 ++ .../docs/post-processors/vagrant.html.md | 5 ++- 3 files changed, 48 insertions(+), 2 deletions(-) create mode 100644 post-processor/vagrant/google.go diff --git a/post-processor/vagrant/google.go b/post-processor/vagrant/google.go new file mode 100644 index 000000000..a4d9dbf48 --- /dev/null +++ b/post-processor/vagrant/google.go @@ -0,0 +1,42 @@ +package vagrant + +import ( + "bytes" + "text/template" + + "github.com/hashicorp/packer/packer" +) + +type googleVagrantfileTemplate struct { + Image string "" +} + +type GoogleProvider struct{} + +func (p *GoogleProvider) KeepInputArtifact() bool { + return true +} + +func (p *GoogleProvider) Process(ui packer.Ui, artifact packer.Artifact, dir string) (vagrantfile string, metadata map[string]interface{}, err error) { + // Create the metadata + metadata = map[string]interface{}{"provider": "google"} + + // Build up the template data to build our Vagrantfile + tplData := &googleVagrantfileTemplate{} + tplData.Image = artifact.Id() + + // Build up the Vagrantfile + var contents bytes.Buffer + t := template.Must(template.New("vf").Parse(defaultGoogleVagrantfile)) + err = t.Execute(&contents, tplData) + vagrantfile = contents.String() + return +} + +var defaultGoogleVagrantfile = ` +Vagrant.configure("2") do |config| + config.vm.provider :google do |google| + google.image = "{{ .Image }}" + end +end +` diff --git a/post-processor/vagrant/post-processor.go b/post-processor/vagrant/post-processor.go index aa65b2292..310270809 100644 --- a/post-processor/vagrant/post-processor.go +++ b/post-processor/vagrant/post-processor.go @@ -25,6 +25,7 @@ var builtins = map[string]string{ "mitchellh.vmware": "vmware", "mitchellh.vmware-esx": "vmware", "pearkes.digitalocean": "digitalocean", + "packer.googlecompute": "google", "packer.parallels": "parallels", "MSOpenTech.hyperv": "hyperv", "transcend.qemu": "libvirt", @@ -232,6 +233,8 @@ func providerForName(name string) Provider { return new(HypervProvider) case "libvirt": return new(LibVirtProvider) + case "google": + return new(GoogleProvider) default: return nil } diff --git a/website/source/docs/post-processors/vagrant.html.md b/website/source/docs/post-processors/vagrant.html.md index 5f0a9bc50..8feabbbbf 100644 --- a/website/source/docs/post-processors/vagrant.html.md +++ b/website/source/docs/post-processors/vagrant.html.md @@ -32,6 +32,7 @@ providers. - AWS - DigitalOcean +- Google - Hyper-V - Parallels - QEMU @@ -100,8 +101,8 @@ Specify overrides within the `override` configuration by provider name: In the example above, the compression level will be set to 1 except for VMware, where it will be set to 0. -The available provider names are: `aws`, `digitalocean`, `virtualbox`, `vmware`, -and `parallels`. +The available provider names are: `aws`, `digitalocean`, `google`, `virtualbox`, +`vmware`, and `parallels`. ## Input Artifacts From e8bac9f4c8f52650827e9406e3bf2a8b89dffdb4 Mon Sep 17 00:00:00 2001 From: Sean Malloy Date: Sat, 23 Dec 2017 23:46:30 -0600 Subject: [PATCH 030/251] Add unit tests for new vagrant Google post-processor --- post-processor/vagrant/google_test.go | 36 +++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 post-processor/vagrant/google_test.go diff --git a/post-processor/vagrant/google_test.go b/post-processor/vagrant/google_test.go new file mode 100644 index 000000000..a66be8539 --- /dev/null +++ b/post-processor/vagrant/google_test.go @@ -0,0 +1,36 @@ +package vagrant + +import ( + "github.com/hashicorp/packer/packer" + "strings" + "testing" +) + +func TestGoogleProvider_impl(t *testing.T) { + var _ Provider = new(GoogleProvider) +} + +func TestGoogleProvider_KeepInputArtifact(t *testing.T) { + p := new(GoogleProvider) + + if !p.KeepInputArtifact() { + t.Fatal("should keep input artifact") + } +} + +func TestGoogleProvider_ArtifactId(t *testing.T) { + p := new(GoogleProvider) + ui := testUi() + artifact := &packer.MockArtifact{ + IdValue: "packer-1234", + } + + vagrantfile, _, err := p.Process(ui, artifact, "foo") + if err != nil { + t.Fatalf("should not have error: %s", err) + } + result := `google.image = "packer-1234"` + if !strings.Contains(vagrantfile, result) { + t.Fatalf("wrong substitution: %s", vagrantfile) + } +} From 7250c4f7f05945e2bb25f6e7e6bd0dd0e81b2028 Mon Sep 17 00:00:00 2001 From: Glenn McDonald Date: Fri, 29 Dec 2017 11:51:57 +1100 Subject: [PATCH 031/251] Add Intel HAXM support to QEMU builder --- builder/qemu/builder.go | 3 ++- website/source/docs/builders/qemu.html.md | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index 43ec6b4c3..d2cda5d98 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -25,6 +25,7 @@ var accels = map[string]struct{}{ "kvm": {}, "tcg": {}, "xen": {}, + "hax": {}, } var netDevice = map[string]bool{ @@ -259,7 +260,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { if _, ok := accels[b.config.Accelerator]; !ok { errs = packer.MultiErrorAppend( - errs, errors.New("invalid accelerator, only 'kvm', 'tcg', 'xen', or 'none' are allowed")) + errs, errors.New("invalid accelerator, only 'kvm', 'tcg', 'xen', 'hax', or 'none' are allowed")) } if _, ok := netDevice[b.config.NetDevice]; !ok { diff --git a/website/source/docs/builders/qemu.html.md b/website/source/docs/builders/qemu.html.md index f3de7cf9c..e57cc15bb 100644 --- a/website/source/docs/builders/qemu.html.md +++ b/website/source/docs/builders/qemu.html.md @@ -110,9 +110,9 @@ Linux server and have not enabled X11 forwarding (`ssh -X`). ### Optional: - `accelerator` (string) - The accelerator type to use when running the VM. - This may be `none`, `kvm`, `tcg`, or `xen`. The appropriate software must - already been installed on your build machine to use the accelerator you - specified. When no accelerator is specified, Packer will try to use `kvm` + This may be `none`, `kvm`, `tcg`, `hax`, or `xen`. The appropriate software + must have already been installed on your build machine to use the accelerator + you specified. When no accelerator is specified, Packer will try to use `kvm` if it is available but will default to `tcg` otherwise. - `boot_command` (array of strings) - This is an array of commands to type From c5bcb97d06e2d66b9b433ae1bb18f3c832d67173 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 4 Jan 2018 15:04:07 -0800 Subject: [PATCH 032/251] "borrow" access config code from terraform. This gives us a few benefits: * timeout early if metadata service can't be reached * report which auth provider we're using * give much better errors if something goes wrong --- builder/amazon/common/access_config.go | 99 +++++++++++++++++++++----- 1 file changed, 81 insertions(+), 18 deletions(-) diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go index b569d0314..e3875d26c 100644 --- a/builder/amazon/common/access_config.go +++ b/builder/amazon/common/access_config.go @@ -1,13 +1,16 @@ package common import ( + "errors" "fmt" "log" "os" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/hashicorp/go-cleanhttp" @@ -16,15 +19,16 @@ import ( // AccessConfig is for common configuration related to AWS access type AccessConfig struct { - AccessKey string `mapstructure:"access_key"` - CustomEndpointEc2 string `mapstructure:"custom_endpoint_ec2"` - MFACode string `mapstructure:"mfa_code"` - ProfileName string `mapstructure:"profile"` - RawRegion string `mapstructure:"region"` - SecretKey string `mapstructure:"secret_key"` - SkipValidation bool `mapstructure:"skip_region_validation"` - Token string `mapstructure:"token"` - session *session.Session + AccessKey string `mapstructure:"access_key"` + CustomEndpointEc2 string `mapstructure:"custom_endpoint_ec2"` + MFACode string `mapstructure:"mfa_code"` + ProfileName string `mapstructure:"profile"` + RawRegion string `mapstructure:"region"` + SecretKey string `mapstructure:"secret_key"` + SkipValidation bool `mapstructure:"skip_region_validation"` + SkipMetadataApiCheck bool `mapstructure:"skip_metadata_api_check"` + Token string `mapstructure:"token"` + session *session.Session } // Config returns a valid aws.Config object for access to AWS services, or @@ -34,14 +38,78 @@ func (c *AccessConfig) Session() (*session.Session, error) { return c.session, nil } - config := aws.NewConfig().WithMaxRetries(11).WithCredentialsChainVerboseErrors(true) + // build a chain provider, lazy-evaluated by aws-sdk + providers := []credentials.Provider{ + &credentials.StaticProvider{Value: credentials.Value{ + AccessKeyID: c.AccessKey, + SecretAccessKey: c.SecretKey, + SessionToken: c.Token, + }}, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{ + Filename: "", + Profile: c.ProfileName, + }, + } - if c.ProfileName != "" { - if err := os.Setenv("AWS_PROFILE", c.ProfileName); err != nil { - return nil, fmt.Errorf("Set env error: %s", err) + // Build isolated HTTP client to avoid issues with globally-shared settings + client := cleanhttp.DefaultClient() + + // Keep the default timeout (100ms) low as we don't want to wait in non-EC2 environments + client.Timeout = 100 * time.Millisecond + + const userTimeoutEnvVar = "AWS_METADATA_TIMEOUT" + userTimeout := os.Getenv(userTimeoutEnvVar) + if userTimeout != "" { + newTimeout, err := time.ParseDuration(userTimeout) + if err == nil { + if newTimeout.Nanoseconds() > 0 { + client.Timeout = newTimeout + } else { + log.Printf("[WARN] Non-positive value of %s (%s) is meaningless, ignoring", userTimeoutEnvVar, newTimeout.String()) + } + } else { + log.Printf("[WARN] Error converting %s to time.Duration: %s", userTimeoutEnvVar, err) } } + log.Printf("[INFO] Setting AWS metadata API timeout to %s", client.Timeout.String()) + cfg := &aws.Config{ + HTTPClient: client, + } + if !c.SkipMetadataApiCheck { + // Real AWS should reply to a simple metadata request. + // We check it actually does to ensure something else didn't just + // happen to be listening on the same IP:Port + metadataClient := ec2metadata.New(session.New(cfg)) + if metadataClient.Available() { + providers = append(providers, &ec2rolecreds.EC2RoleProvider{ + Client: metadataClient, + }) + log.Print("[INFO] AWS EC2 instance detected via default metadata" + + " API endpoint, EC2RoleProvider added to the auth chain") + } else { + log.Printf("[INFO] Ignoring AWS metadata API endpoint " + + "as it doesn't return any instance-id") + } + } + + creds := credentials.NewChainCredentials(providers) + cp, err := creds.Get() + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" { + return nil, errors.New("No valid credential sources found for AWS Builder. " + + "Please see https://www.packer.io/docs/builders/amazon.html#specifying-amazon-credentials " + + "for more information on providing credentials for the AWS Builder.") + } + + return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err) + } + log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName) + + config := aws.NewConfig().WithMaxRetries(11).WithCredentialsChainVerboseErrors(true) + config = config.WithCredentials(creds) + if c.RawRegion != "" { config = config.WithRegion(c.RawRegion) } else if region := c.metadataRegion(); region != "" { @@ -52,11 +120,6 @@ func (c *AccessConfig) Session() (*session.Session, error) { config = config.WithEndpoint(c.CustomEndpointEc2) } - if c.AccessKey != "" { - config = config.WithCredentials( - credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)) - } - opts := session.Options{ SharedConfigState: session.SharedConfigEnable, Config: *config, From 281dd1258a16e9bc8d5a765fcbe30fd433a02df9 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Sun, 1 Nov 2015 21:46:14 -0600 Subject: [PATCH 033/251] Added proper support for downloading via a Windows UNC path or a relative uri. Added proper support for validating a downloadableURL containing a UNC or relative uri. Removed the workaround for an earlier Go issue that had remained dormant in common/download.go (issue #5927). When building a .vmx file via the vmware-iso builder, transform the path to the correct os-formatted one (using filepath.FromSlash). --- builder/vmware/iso/step_create_vmx.go | 5 ++ common/config.go | 94 ++++++++++++++++++--------- common/download.go | 49 +++++++++++++- packer/core.go | 2 +- 4 files changed, 115 insertions(+), 35 deletions(-) diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index 1f4ee812f..62546d212 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -43,6 +43,11 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { isoPath := state.Get("iso_path").(string) ui := state.Get("ui").(packer.Ui) + // Convert the iso_path into a path relative to the .vmx file if possible + if relativeIsoPath,err := filepath.Rel(config.VMXTemplatePath, filepath.FromSlash(isoPath)); err == nil { + isoPath = relativeIsoPath + } + ui.Say("Building and writing VMX file") vmxTemplate := DefaultVMXTemplate diff --git a/common/config.go b/common/config.go index 7bef253ee..7b01191a8 100644 --- a/common/config.go +++ b/common/config.go @@ -56,77 +56,109 @@ func DownloadableURL(original string) (string, error) { // for more info about valid windows URIs idx := strings.Index(original, ":") if idx == 1 { - original = "file:///" + original + original = "file://" + filepath.ToSlash(original) } } - u, err := url.Parse(original) + + // XXX: The validation here is later re-parsed in common/download.go and + // thus any modifications here must remain consistent over there too. + uri, err := url.Parse(original) if err != nil { return "", err } - if u.Scheme == "" { - u.Scheme = "file" + if uri.Scheme == "" { + uri.Scheme = "file" } - if u.Scheme == "file" { - // Windows file handling is all sorts of tricky... + const UNCPrefix = string(os.PathSeparator)+string(os.PathSeparator) + if uri.Scheme == "file" { + var ospath string // os-formatted pathname if runtime.GOOS == "windows" { - // If the path is using Windows-style slashes, URL parses - // it into the host field. - if u.Path == "" && strings.Contains(u.Host, `\`) { - u.Path = u.Host - u.Host = "" + // Move any extra path components that were mis-parsed into the Host + // field back into the uri.Path field + if len(uri.Host) >= len(UNCPrefix) && uri.Host[:len(UNCPrefix)] == UNCPrefix { + idx := strings.Index(uri.Host[len(UNCPrefix):], string(os.PathSeparator)) + if idx > -1 { + uri.Path = filepath.ToSlash(uri.Host[idx+len(UNCPrefix):]) + uri.Path + uri.Host = uri.Host[:idx+len(UNCPrefix)] + } } + // Now all we need to do to convert the uri to a platform-specific path + // is to trade it's slashes for some os.PathSeparator ones. + ospath = uri.Host + filepath.FromSlash(uri.Path) + + } else { + // Since we're already using sane paths on a sane platform, anything in + // uri.Host can be assumed that the user is describing a relative uri. + // This means that if we concatenate it with uri.Path, the filepath + // transform will still open the file correctly. + // i.e. file://localdirectory/filename -> localdirectory/filename + ospath = uri.Host + uri.Path } // Only do the filepath transformations if the file appears - // to actually exist. - if _, err := os.Stat(u.Path); err == nil { - u.Path, err = filepath.Abs(u.Path) + // to actually exist. We don't do it on windows, because EvalSymlinks + // won't understand how to handle UNC paths and other Windows-specific minutae. + if _, err := os.Stat(ospath); err == nil && runtime.GOOS != "windows" { + ospath, err = filepath.Abs(ospath) if err != nil { return "", err } - u.Path, err = filepath.EvalSymlinks(u.Path) + ospath, err = filepath.EvalSymlinks(ospath) if err != nil { return "", err } - u.Path = filepath.Clean(u.Path) + ospath = filepath.Clean(ospath) } + // now that ospath was normalized and such.. if runtime.GOOS == "windows" { - // Also replace all backslashes with forwardslashes since Windows - // users are likely to do this but the URL should actually only - // contain forward slashes. - u.Path = strings.Replace(u.Path, `\`, `/`, -1) - // prepend absolute windows paths with "/" so that when we - // compose u.String() below the outcome will be correct - // file:///c/blah syntax; otherwise u.String() will only add - // file:// which is not technically a correct windows URI - if filepath.IsAbs(u.Path) && !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path + uri.Host = "" + // Check to see if our ospath is unc-prefixed, and if it is then split + // the UNC host into uri.Host, leaving the rest in ospath. + // This way, our UNC-uri is protected from injury in the call to uri.String() + if len(ospath) >= len(UNCPrefix) && ospath[:len(UNCPrefix)] == UNCPrefix { + idx := strings.Index(ospath[len(UNCPrefix):], string(os.PathSeparator)) + if idx > -1 { + uri.Host = ospath[:len(UNCPrefix)+idx] + ospath = ospath[len(UNCPrefix)+idx:] + } } - + // Restore the uri by re-transforming our os-formatted path + uri.Path = filepath.ToSlash(ospath) + } else { + uri.Host = "" + uri.Path = filepath.ToSlash(ospath) } } // Make sure it is lowercased - u.Scheme = strings.ToLower(u.Scheme) + uri.Scheme = strings.ToLower(uri.Scheme) // Verify that the scheme is something we support in our common downloader. supported := []string{"file", "http", "https"} found := false for _, s := range supported { - if u.Scheme == s { + if uri.Scheme == s { found = true break } } if !found { - return "", fmt.Errorf("Unsupported URL scheme: %s", u.Scheme) + return "", fmt.Errorf("Unsupported URL scheme: %s", uri.Scheme) } - return u.String(), nil + + // explicit check to see if we need to manually replace the uri host with a UNC one + if runtime.GOOS == "windows" && uri.Scheme == "file" { + if len(uri.Host) >= len(UNCPrefix) && uri.Host[:len(UNCPrefix)] == UNCPrefix { + escapedHost := url.QueryEscape(uri.Host) + return strings.Replace(uri.String(), escapedHost, uri.Host, 1), nil + } + } + return uri.String(), nil } // FileExistsLocally takes the URL output from DownloadableURL, and determines diff --git a/common/download.go b/common/download.go index ada4c9756..5f9940056 100644 --- a/common/download.go +++ b/common/download.go @@ -16,6 +16,9 @@ import ( "net/url" "os" "runtime" + "path" + "path/filepath" + "strings" ) // DownloadConfig is the configuration given to instantiate a new @@ -130,9 +133,49 @@ func (d *DownloadClient) Get() (string, error) { // locally and we don't make a copy. Normally we would copy or download. log.Printf("[DEBUG] Using local file: %s", finalPath) - // Remove forward slash on absolute Windows file URLs before processing - if runtime.GOOS == "windows" && len(finalPath) > 0 && finalPath[0] == '/' { - finalPath = finalPath[1:] + // FIXME: + // cwd should point to the path relative to client.json, but + // since this isn't exposed to us anywhere, we use os.Getwd() + // to figure it out. + cwd,err := os.Getwd() + if err != nil { + return "", fmt.Errorf("Unable to get working directory") + } + + // convert the actual file uri to a windowsy path + // (this logic must correspond to the same logic in common/config.go) + if runtime.GOOS == "windows" { + const UNCPrefix = string(os.PathSeparator)+string(os.PathSeparator) + + // move any extra path components that were parsed into Host due + // to UNC into the url.Path field so that it's PathSeparators get + // normalized + if len(url.Host) >= len(UNCPrefix) && url.Host[:len(UNCPrefix)] == UNCPrefix { + idx := strings.Index(url.Host[len(UNCPrefix):], string(os.PathSeparator)) + if idx > -1 { + url.Path = filepath.ToSlash(url.Host[idx+len(UNCPrefix):]) + url.Path + url.Host = url.Host[:idx+len(UNCPrefix)] + } + } + + // clean up backward-slashes since they only matter when part of a unc path + urlPath := filepath.ToSlash(url.Path) + + // semi-absolute path (current drive letter) -- file:///absolute/path + if url.Host == "" && len(urlPath) > 0 && urlPath[0] == '/' { + finalPath = path.Join(filepath.VolumeName(cwd), urlPath) + + // relative path -- file://./relative/path + // file://relative/path + } else if url.Host == "" || (len(url.Host) > 0 && url.Host[0] == '.') { + finalPath = path.Join(filepath.ToSlash(cwd), urlPath) + + // absolute path + } else { + // UNC -- file://\\host/share/whatever + // drive -- file://c:/absolute/path + finalPath = path.Join(url.Host, urlPath) + } } // Keep track of the source so we can make sure not to delete this later diff --git a/packer/core.go b/packer/core.go index 7da3e4510..c8ac2cfb7 100644 --- a/packer/core.go +++ b/packer/core.go @@ -67,7 +67,7 @@ func NewCore(c *CoreConfig) (*Core, error) { return nil, err } - // Go through and interpolate all the build names. We shuld be able + // Go through and interpolate all the build names. We should be able // to do this at this point with the variables. result.builds = make(map[string]*template.Builder) for _, b := range c.Template.Builders { From da9c94b345a5266b4534200a78a398473fa77377 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Mon, 18 Jan 2016 17:36:47 -0600 Subject: [PATCH 034/251] Added some testcases for the various file uri transforms to download_test.go Moved some of the code for normalizing a Windows file uri to a regular path into it's own function NormalizeWindowsURL --- common/download.go | 88 +++++++++++++++++---------------- common/download_test.go | 105 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 151 insertions(+), 42 deletions(-) diff --git a/common/download.go b/common/download.go index 5f9940056..65d72d22a 100644 --- a/common/download.go +++ b/common/download.go @@ -101,6 +101,44 @@ func (d *DownloadClient) Cancel() { // TODO(mitchellh): Implement } +// Take a uri and convert it to a path that makes sense on the Windows platform +func NormalizeWindowsURL(basepath string, url url.URL) string { + // This logic must correspond to the same logic in the NormalizeWindowsURL + // function found in common/config.go since that function _also_ checks that + // the url actually exists in file form. + + const UNCPrefix = string(os.PathSeparator)+string(os.PathSeparator) + + // move any extra path components that were parsed into Host due + // to UNC into the url.Path field so that it's PathSeparators get + // normalized + if len(url.Host) >= len(UNCPrefix) && url.Host[:len(UNCPrefix)] == UNCPrefix { + idx := strings.Index(url.Host[len(UNCPrefix):], string(os.PathSeparator)) + if idx > -1 { + url.Path = filepath.ToSlash(url.Host[idx+len(UNCPrefix):]) + url.Path + url.Host = url.Host[:idx+len(UNCPrefix)] + } + } + + // clean up backward-slashes since they only matter when part of a unc path + urlPath := filepath.ToSlash(url.Path) + + // semi-absolute path (current drive letter) -- file:///absolute/path + if url.Host == "" && len(urlPath) > 0 && urlPath[0] == '/' { + return path.Join(filepath.VolumeName(basepath), urlPath) + + // relative path -- file://./relative/path + // file://relative/path + } else if url.Host == "" || (len(url.Host) > 0 && url.Host[0] == '.') { + return path.Join(filepath.ToSlash(basepath), urlPath) + } + + // absolute path + // UNC -- file://\\host/share/whatever + // drive -- file://c:/absolute/path + return path.Join(url.Host, urlPath) +} + func (d *DownloadClient) Get() (string, error) { // If we already have the file and it matches, then just return the target path. if verify, _ := d.VerifyChecksum(d.config.TargetPath); verify { @@ -133,49 +171,17 @@ func (d *DownloadClient) Get() (string, error) { // locally and we don't make a copy. Normally we would copy or download. log.Printf("[DEBUG] Using local file: %s", finalPath) - // FIXME: - // cwd should point to the path relative to client.json, but - // since this isn't exposed to us anywhere, we use os.Getwd() - // to figure it out. - cwd,err := os.Getwd() - if err != nil { - return "", fmt.Errorf("Unable to get working directory") - } - - // convert the actual file uri to a windowsy path - // (this logic must correspond to the same logic in common/config.go) + // transform the actual file uri to a windowsy path if we're being windowsy. if runtime.GOOS == "windows" { - const UNCPrefix = string(os.PathSeparator)+string(os.PathSeparator) - - // move any extra path components that were parsed into Host due - // to UNC into the url.Path field so that it's PathSeparators get - // normalized - if len(url.Host) >= len(UNCPrefix) && url.Host[:len(UNCPrefix)] == UNCPrefix { - idx := strings.Index(url.Host[len(UNCPrefix):], string(os.PathSeparator)) - if idx > -1 { - url.Path = filepath.ToSlash(url.Host[idx+len(UNCPrefix):]) + url.Path - url.Host = url.Host[:idx+len(UNCPrefix)] - } - } - - // clean up backward-slashes since they only matter when part of a unc path - urlPath := filepath.ToSlash(url.Path) - - // semi-absolute path (current drive letter) -- file:///absolute/path - if url.Host == "" && len(urlPath) > 0 && urlPath[0] == '/' { - finalPath = path.Join(filepath.VolumeName(cwd), urlPath) - - // relative path -- file://./relative/path - // file://relative/path - } else if url.Host == "" || (len(url.Host) > 0 && url.Host[0] == '.') { - finalPath = path.Join(filepath.ToSlash(cwd), urlPath) - - // absolute path - } else { - // UNC -- file://\\host/share/whatever - // drive -- file://c:/absolute/path - finalPath = path.Join(url.Host, urlPath) + // FIXME: cwd should point to a path relative to the TEMPLATE path, + // but since this isn't exposed to us anywhere, we use os.Getwd() + // and assume the user ran packer in the same directory that + // any relative files are located at. + cwd,err := os.Getwd() + if err != nil { + return "", fmt.Errorf("Unable to get working directory") } + finalPath = NormalizeWindowsURL(cwd, *url) } // Keep track of the source so we can make sure not to delete this later diff --git a/common/download_test.go b/common/download_test.go index 497a05649..b46d81455 100644 --- a/common/download_test.go +++ b/common/download_test.go @@ -9,6 +9,8 @@ import ( "net/http/httptest" "os" "runtime" + "strings" + "path/filepath" "testing" ) @@ -382,5 +384,106 @@ func TestDownloadFileUrl(t *testing.T) { if _, err = os.Stat(sourcePath); err != nil { t.Errorf("Could not stat source file: %s", sourcePath) } - +} + +// SimulateFileUriDownload is a simple utility function that converts a uri +// into a testable file path whilst ignoring a correct checksum match, stripping +// UNC path info, and then calling stat to ensure the correct file exists. +// (used by TestFileUriTransforms) +func SimulateFileUriDownload(t *testing.T, uri string) (string,error) { + // source_path is a file path and source is a network path + source := fmt.Sprintf(uri) + t.Logf("Trying to download %s", source) + + config := &DownloadConfig{ + Url: source, + // This should be wrong. We want to make sure we don't delete + Checksum: []byte("nope"), + Hash: HashForType("sha256"), + CopyFile: false, + } + + // go go go + client := NewDownloadClient(config) + path, err := client.Get() + + // ignore any non-important checksum errors if it's not a unc path + if !strings.HasPrefix(path, "\\\\") && err.Error() != "checksums didn't match expected: 6e6f7065" { + t.Fatalf("Unexpected failure; expected checksum not to match") + } + + // if it's a unc path, then remove the host and share name so we don't have + // to force the user to enable ADMIN$ and Windows File Sharing + if strings.HasPrefix(path, "\\\\") { + res := strings.SplitN(path, "/", 3) + path = "/" + res[2] + } + + if _, err = os.Stat(path); err != nil { + t.Errorf("Could not stat source file: %s", path) + } + return path,err +} + +// TestFileUriTransforms tests the case where we use a local file uri +// for iso_url. There's a few different formats that a file uri can exist as +// and so we try to test the most useful and common ones. +func TestFileUriTransforms(t *testing.T) { + const testpath = /* have your */ "test-fixtures/fileurl/cake" /* and eat it too */ + const host = "localhost" + + var cwd string + var volume string + var share string + + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("Unable to detect working directory: %s", err) + return + } + cwd = filepath.ToSlash(cwd) + volume = filepath.VolumeName(cwd) + share = volume + if share[len(share)-1] == ':' { + share = share[:len(share)-1] + "$" + } + cwd = cwd[len(volume):] + + t.Logf("TestFileUriTransforms : Running with cwd : '%s'", cwd) + t.Logf("TestFileUriTransforms : Running with volume : '%s'", volume) + + // ./relative/path -> ./relative/path + // /absolute/path -> /absolute/path + // c:/windows/absolute -> c:/windows/absolute + // \\host/sharename/file -> \\host/sharename/file + testcases := []string{ + "./%s", + cwd + "/%s", + volume + cwd + "/%s", + "\\\\" + host + "/" + share + "/" + cwd[1:] + "/%s", + } + + // all regular slashed testcases + for _,testcase := range testcases { + uri := "file://" + fmt.Sprintf(testcase, testpath) + t.Logf("TestFileUriTransforms : Trying Uri '%s'", uri) + res,err := SimulateFileUriDownload(t, uri) + if err != nil { + t.Errorf("Unable to transform uri '%s' into a path : %v", uri, err) + } + t.Errorf("TestFileUriTransforms : Result Path '%s'", res) + } + + // ...and finally the oddball windows native path + // \\host\sharename\file -> \\host/sharename/file + testpath_native := filepath.FromSlash(testpath) + testcase_native := "\\\\" + host + "\\" + share + "\\" + filepath.FromSlash(cwd[1:]) + "\\%s" + uri := "file://" + fmt.Sprintf(testcase_native, testpath_native) + t.Logf("TestFileUriTransforms : Trying Uri '%s'", uri) + res,err := SimulateFileUriDownload(t, uri) + if err != nil { + t.Errorf("Unable to transform uri '%s' into a path", uri) + return + } + t.Errorf("TestFileUriTransforms : Result Path '%s'", res) } From 60831801a72d5cd1932dbac44d9a36513387866f Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Tue, 5 Apr 2016 13:11:30 -0500 Subject: [PATCH 035/251] Added the file, ftp, and smb downloaders to common/download.go --- common/download.go | 353 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 313 insertions(+), 40 deletions(-) diff --git a/common/download.go b/common/download.go index 65d72d22a..b0550927f 100644 --- a/common/download.go +++ b/common/download.go @@ -12,7 +12,6 @@ import ( "hash" "io" "log" - "net/http" "net/url" "os" "runtime" @@ -21,6 +20,13 @@ import ( "strings" ) +import ( + "net/http" + "github.com/jlaffeye/ftp" + "bufio" +) + + // DownloadConfig is the configuration given to instantiate a new // download instance. Once a configuration is used to instantiate // a download client, it must not be modified. @@ -78,10 +84,15 @@ func HashForType(t string) hash.Hash { // NewDownloadClient returns a new DownloadClient for the given // configuration. func NewDownloadClient(c *DownloadConfig) *DownloadClient { + const mtu = 1500 /* ethernet */ - 20 /* ipv4 */ - 20 /* tcp */ + if c.DownloaderMap == nil { c.DownloaderMap = map[string]Downloader{ + "file": &FileDownloader{bufferSize: nil}, + "ftp": &FTPDownloader{userInfo: url.Userinfo{username:"anonymous", password: "anonymous@"}, mtu: mtu}, "http": &HTTPDownloader{userAgent: c.UserAgent}, "https": &HTTPDownloader{userAgent: c.UserAgent}, + "smb": &SMBDownloader{bufferSize: nil} } } @@ -101,44 +112,6 @@ func (d *DownloadClient) Cancel() { // TODO(mitchellh): Implement } -// Take a uri and convert it to a path that makes sense on the Windows platform -func NormalizeWindowsURL(basepath string, url url.URL) string { - // This logic must correspond to the same logic in the NormalizeWindowsURL - // function found in common/config.go since that function _also_ checks that - // the url actually exists in file form. - - const UNCPrefix = string(os.PathSeparator)+string(os.PathSeparator) - - // move any extra path components that were parsed into Host due - // to UNC into the url.Path field so that it's PathSeparators get - // normalized - if len(url.Host) >= len(UNCPrefix) && url.Host[:len(UNCPrefix)] == UNCPrefix { - idx := strings.Index(url.Host[len(UNCPrefix):], string(os.PathSeparator)) - if idx > -1 { - url.Path = filepath.ToSlash(url.Host[idx+len(UNCPrefix):]) + url.Path - url.Host = url.Host[:idx+len(UNCPrefix)] - } - } - - // clean up backward-slashes since they only matter when part of a unc path - urlPath := filepath.ToSlash(url.Path) - - // semi-absolute path (current drive letter) -- file:///absolute/path - if url.Host == "" && len(urlPath) > 0 && urlPath[0] == '/' { - return path.Join(filepath.VolumeName(basepath), urlPath) - - // relative path -- file://./relative/path - // file://relative/path - } else if url.Host == "" || (len(url.Host) > 0 && url.Host[0] == '.') { - return path.Join(filepath.ToSlash(basepath), urlPath) - } - - // absolute path - // UNC -- file://\\host/share/whatever - // drive -- file://c:/absolute/path - return path.Join(url.Host, urlPath) -} - func (d *DownloadClient) Get() (string, error) { // If we already have the file and it matches, then just return the target path. if verify, _ := d.VerifyChecksum(d.config.TargetPath); verify { @@ -153,6 +126,11 @@ func (d *DownloadClient) Get() (string, error) { log.Printf("Parsed URL: %#v", u) + /* FIXME: + handle the special case of d.config.CopyFile which returns the path + in an os-specific format. + */ + // Files when we don't copy the file are special cased. var f *os.File var finalPath string @@ -271,7 +249,7 @@ func (*HTTPDownloader) Cancel() { } func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { - log.Printf("Starting download: %s", src.String()) + log.Printf("Starting download over HTTP: %s", src.String()) // Seek to the beginning by default if _, err := dst.Seek(0, 0); err != nil { @@ -350,3 +328,298 @@ func (d *HTTPDownloader) Progress() uint { func (d *HTTPDownloader) Total() uint { return d.total } + +// FTPDownloader is an implementation of Downloader that downloads +// files over FTP. +type FTPDownloader struct { + userInfo url.UserInfo + mtu uint + + active bool + progress uint + total uint +} + +func (*FTPDownloader) Cancel() { + d.active = false +} + +func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { + var userinfo *url.Userinfo + + userinfo = d.userInfo + d.active = false + + // check the uri is correct + uri, err := url.Parse(src) + if err != nil { return err } + + if uri.Scheme != "ftp" { + return fmt.Errorf("Unexpected uri scheme: %s", uri.Scheme) + } + + // connect to ftp server + var cli *ftp.ServerConn + + log.Printf("Starting download over FTP: %s : %s\n", uri.Host, Uri.Path) + cli,err := ftp.Dial(uri.Host) + if err != nil { return nil } + defer cli.Close() + + // handle authentication + if uri.User != nil { userinfo = uri.User } + + log.Printf("Authenticating to FTP server: %s : %s\n", uri.User.username, uri.User.password) + err = cli.Login(userinfo.username, userinfo.password) + if err != nil { return err } + + // locate specified path + path := path.Dir(uri.Path) + + log.Printf("Changing to FTP directory : %s\n", path) + err = cli.ChangeDir(path) + if err != nil { return nil } + + curpath,err := cli.CurrentDir() + if err != nil { return err } + log.Printf("Current FTP directory : %s\n", curpath) + + // collect stats about the specified file + var name string + var entry *ftp.Entry + + _,name = path.Split(uri.Path) + entry = nil + + entries,err := cli.List(curpath) + for _,e := range entries { + if e.Type == ftp.EntryTypeFile && e.Name == name { + entry = e + break + } + } + + if entry == nil { + return fmt.Errorf("Unable to find file: %s", uri.Path) + } + log.Printf("Found file : %s : %v bytes\n", entry.Name, entry.Size) + + d.progress = 0 + d.total = entry.Size + + // download specified file + d.active = true + reader,err := cli.RetrFrom(uri.Path, d.progress) + if err != nil { return nil } + + // do it in a goro so that if someone wants to cancel it, they can + errch := make(chan error) + go func(d *FTPDownloader, r *io.Reader, w *bufio.Writer, e chan error) { + defer w.Flush() + for ; d.active { + n,err := io.CopyN(writer, reader, d.mtu) + if err != nil { break } + d.progress += n + } + d.active = false + e <- err + }(d, reader, bufio.NewWriter(dst), errch) + + // spin until it's done + err = <-errch + reader.Close() + + if err == nil && d.progress != d.total { + err = fmt.Errorf("FTP total transfer size was %d when %d was expected", d.progress, d.total) + } + + // log out and quit + cli.Logout() + cli.Quit() + return err +} + +func (d *FTPDownloader) Progress() uint { + return d.progress +} + +func (d *FTPDownloader) Total() uint { + return d.total +} + +// FileDownloader is an implementation of Downloader that downloads +// files using the regular filesystem. +type FileDownloader struct { + bufferSize *uint + + active bool + progress uint + total uint +} + +func (*FileDownloader) Cancel() { + d.active = false +} + +func (d *FileDownloader) Progress() uint { + return d.progress +} + +func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { + d.active = false + + /* parse the uri using the net/url module */ + uri, err := url.Parse(src) + if uri.Scheme != "file" { + return fmt.Errorf("Unexpected uri scheme: %s", uri.Scheme) + } + + /* use the current working directory as the base for relative uri's */ + cwd,err := os.Getwd() + if err != nil { + return "", fmt.Errorf("Unable to get working directory") + } + + /* determine which uri format is being used and convert to a real path */ + var realpath string, basepath string + basepath = filepath.ToSlash(cwd) + + // absolute path -- file://c:/absolute/path + if strings.HasSuffix(uri.Host, ":") { + realpath = path.Join(uri.Host, uri.Path) + + // semi-absolute path (current drive letter) -- file:///absolute/path + } else if uri.Host == "" && strings.HasPrefix(uri.Path, "/") { + realpath = path.Join(filepath.VolumeName(basepath), uri.Path) + + // relative path -- file://./relative/path + } else if uri.Host == "." { + realpath = path.Join(basepath, uri.Path) + + // relative path -- file://relative/path + } else { + realpath = path.Join(basepath, uri.Host, uri.Path) + } + + /* download the file using the operating system's facilities */ + d.progress = 0 + d.active = true + + f, err = os.Open(realpath) + if err != nil { return err } + defer f.Close() + + // get the file size + fi, err := f.Stat() + if err != nil { return err } + d.total = fi.Size() + + // no bufferSize specified, so copy synchronously. + if d.bufferSize == nil { + n,err := io.Copy(dst, f) + d.active = false + d.progress += n + + // use a goro in case someone else wants to enable cancel/resume + } else { + errch := make(chan error) + go func(d* FileDownloader, r *bufio.Reader, w *bufio.Writer, e chan error) { + defer w.Flush() + for ; d.active { + n,err := io.CopyN(writer, reader, d.bufferSize) + if err != nil { break } + d.progress += n + } + d.active = false + e <- err + }(d, f, bufio.NewWriter(dst), errch) + + // ...and we spin until it's done + err = <-errch + } + f.Close() + return err +} + +func (d *FileDownloader) Total() uint { + return d.total +} + +// SMBDownloader is an implementation of Downloader that downloads +// files using the "\\" path format on Windows +type SMBDownloader struct { + bufferSize *uint + + active bool + progress uint + total uint +} + +func (*SMBDownloader) Cancel() { + d.active = false +} + +func (d *SMBDownloader) Progress() uint { + return d.progress +} + +func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { + const UNCPrefix = string(os.PathSeparator)+string(os.PathSeparator) + d.active = false + + if runtime.GOOS != "windows" { + return fmt.Errorf("Support for SMB based uri's are not supported on %s", runtime.GOOS) + } + + /* convert the uri using the net/url module to a UNC path */ + var realpath string + uri, err := url.Parse(src) + if uri.Scheme != "smb" { + return fmt.Errorf("Unexpected uri scheme: %s", uri.Scheme) + } + + realpath = UNCPrefix + filepath.ToSlash(path.Join(uri.Host, uri.Path)) + + /* Open up the "\\"-prefixed path using the Windows filesystem */ + d.progress = 0 + d.active = true + + f, err = os.Open(realpath) + if err != nil { return err } + defer f.Close() + + // get the file size (at the risk of performance) + fi, err := f.Stat() + if err != nil { return err } + d.total = fi.Size() + + // no bufferSize specified, so copy synchronously. + if d.bufferSize == nil { + n,err := io.Copy(dst, f) + d.active = false + d.progress += n + + // use a goro in case someone else wants to enable cancel/resume + } else { + errch := make(chan error) + go func(d* SMBDownloader, r *bufio.Reader, w *bufio.Writer, e chan error) { + defer w.Flush() + for ; d.active { + n,err := io.CopyN(writer, reader, d.bufferSize) + if err != nil { break } + d.progress += n + } + d.active = false + e <- err + }(d, f, bufio.NewWriter(dst), errch) + + // ...and as usual we spin until it's done + err = <-errch + } + f.Close() + return err +} + +func (d *SMBDownloader) Total() uint { + return d.total +} From 6170e24ecbd4a60bb94b6ff3ea11661fb65f8645 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Tue, 5 Apr 2016 15:01:06 -0500 Subject: [PATCH 036/251] Refactored the code a bit to move the CopyFile hack out of DownloadClient and instead into each protocol. config.go: Removed all of the windows-specific net/url hackery since it's now handled mostly by download.go Removed the replacement of '\' with '/' since url.Parse does it now. Added knowledge of the other protocols implemented in download.go (ftp, smb) Removed some modules that were unused in this commit. download.go: Moved the file-path conversions for the different protocols into their own internally callable functions. Shuffled some of the functions around in case someone wants to implement the ability to resume. Modified DownloadClient.Get to remove the CopyFile special case and trust the protocol implementations if a user doesn't want to copy the file. Since all the protocols except for HTTPDownloader implement Cancel, added a Resume method as a placeholder for another developer to implement. Added a few missing names from their function definitions. Fixed the syntax in a few lines due to my suckage at go. Adjusted the types for progress and total so that they support 64-bit sizes. Removed the usage of the bufio library since it wasn't really being used. --- common/config.go | 124 +++----------- common/download.go | 391 +++++++++++++++++++++++---------------------- 2 files changed, 225 insertions(+), 290 deletions(-) diff --git a/common/config.go b/common/config.go index 7b01191a8..103c51105 100644 --- a/common/config.go +++ b/common/config.go @@ -2,10 +2,8 @@ package common import ( "fmt" - "net/url" "os" "path/filepath" - "runtime" "strings" "time" ) @@ -47,118 +45,40 @@ func ChooseString(vals ...string) string { // a completely valid URL. For example, the original URL might be "local/file.iso" // which isn't a valid URL. DownloadableURL will return "file:///local/file.iso" func DownloadableURL(original string) (string, error) { - if runtime.GOOS == "windows" { - // If the distance to the first ":" is just one character, assume - // we're dealing with a drive letter and thus a file path. - // prepend with "file:///"" now so that url.Parse won't accidentally - // parse the drive letter into the url scheme. - // See https://blogs.msdn.microsoft.com/ie/2006/12/06/file-uris-in-windows/ - // for more info about valid windows URIs - idx := strings.Index(original, ":") - if idx == 1 { - original = "file://" + filepath.ToSlash(original) - } - } - - // XXX: The validation here is later re-parsed in common/download.go and - // thus any modifications here must remain consistent over there too. - uri, err := url.Parse(original) - if err != nil { - return "", err - } - - if uri.Scheme == "" { - uri.Scheme = "file" - } - - const UNCPrefix = string(os.PathSeparator)+string(os.PathSeparator) - if uri.Scheme == "file" { - var ospath string // os-formatted pathname - if runtime.GOOS == "windows" { - // Move any extra path components that were mis-parsed into the Host - // field back into the uri.Path field - if len(uri.Host) >= len(UNCPrefix) && uri.Host[:len(UNCPrefix)] == UNCPrefix { - idx := strings.Index(uri.Host[len(UNCPrefix):], string(os.PathSeparator)) - if idx > -1 { - uri.Path = filepath.ToSlash(uri.Host[idx+len(UNCPrefix):]) + uri.Path - uri.Host = uri.Host[:idx+len(UNCPrefix)] - } - } - // Now all we need to do to convert the uri to a platform-specific path - // is to trade it's slashes for some os.PathSeparator ones. - ospath = uri.Host + filepath.FromSlash(uri.Path) - - } else { - // Since we're already using sane paths on a sane platform, anything in - // uri.Host can be assumed that the user is describing a relative uri. - // This means that if we concatenate it with uri.Path, the filepath - // transform will still open the file correctly. - // i.e. file://localdirectory/filename -> localdirectory/filename - ospath = uri.Host + uri.Path - } - // Only do the filepath transformations if the file appears - // to actually exist. We don't do it on windows, because EvalSymlinks - // won't understand how to handle UNC paths and other Windows-specific minutae. - if _, err := os.Stat(ospath); err == nil && runtime.GOOS != "windows" { - ospath, err = filepath.Abs(ospath) - if err != nil { - return "", err - } - - ospath, err = filepath.EvalSymlinks(ospath) - if err != nil { - return "", err - } - - ospath = filepath.Clean(ospath) - } - - // now that ospath was normalized and such.. - if runtime.GOOS == "windows" { - uri.Host = "" - // Check to see if our ospath is unc-prefixed, and if it is then split - // the UNC host into uri.Host, leaving the rest in ospath. - // This way, our UNC-uri is protected from injury in the call to uri.String() - if len(ospath) >= len(UNCPrefix) && ospath[:len(UNCPrefix)] == UNCPrefix { - idx := strings.Index(ospath[len(UNCPrefix):], string(os.PathSeparator)) - if idx > -1 { - uri.Host = ospath[:len(UNCPrefix)+idx] - ospath = ospath[len(UNCPrefix)+idx:] - } - } - // Restore the uri by re-transforming our os-formatted path - uri.Path = filepath.ToSlash(ospath) - } else { - uri.Host = "" - uri.Path = filepath.ToSlash(ospath) - } - } - - // Make sure it is lowercased - uri.Scheme = strings.ToLower(uri.Scheme) // Verify that the scheme is something we support in our common downloader. - supported := []string{"file", "http", "https"} + supported := []string{"file", "http", "https", "ftp", "smb"} found := false for _, s := range supported { - if uri.Scheme == s { + if strings.HasPrefix(original, s + "://") { found = true break } } - if !found { - return "", fmt.Errorf("Unsupported URL scheme: %s", uri.Scheme) + // If it's properly prefixed with something we support, then we don't need + // to make it a uri. + if found { + return original, nil } - // explicit check to see if we need to manually replace the uri host with a UNC one - if runtime.GOOS == "windows" && uri.Scheme == "file" { - if len(uri.Host) >= len(UNCPrefix) && uri.Host[:len(UNCPrefix)] == UNCPrefix { - escapedHost := url.QueryEscape(uri.Host) - return strings.Replace(uri.String(), escapedHost, uri.Host, 1), nil - } + // If the file exists, then make it an absolute path + _,err := os.Stat(original) + if err == nil { + original, err = filepath.Abs(filepath.FromSlash(original)) + if err != nil { return "", err } + + original, err = filepath.EvalSymlinks(original) + if err != nil { return "", err } + + original = filepath.Clean(original) + original = filepath.ToSlash(original) } - return uri.String(), nil + + // Since it wasn't properly prefixed, let's make it into a well-formed + // file:// uri. + + return "file://" + original, nil } // FileExistsLocally takes the URL output from DownloadableURL, and determines diff --git a/common/download.go b/common/download.go index b0550927f..c06f385e8 100644 --- a/common/download.go +++ b/common/download.go @@ -10,20 +10,20 @@ import ( "errors" "fmt" "hash" - "io" "log" "net/url" "os" "runtime" "path" - "path/filepath" "strings" ) +// imports related to each Downloader implementation import ( + "io" + "path/filepath" "net/http" - "github.com/jlaffeye/ftp" - "bufio" + "github.com/jlaffaye/ftp" ) @@ -89,23 +89,35 @@ func NewDownloadClient(c *DownloadConfig) *DownloadClient { if c.DownloaderMap == nil { c.DownloaderMap = map[string]Downloader{ "file": &FileDownloader{bufferSize: nil}, - "ftp": &FTPDownloader{userInfo: url.Userinfo{username:"anonymous", password: "anonymous@"}, mtu: mtu}, + "ftp": &FTPDownloader{userInfo: url.UserPassword("anonymous", "anonymous@"), mtu: mtu}, "http": &HTTPDownloader{userAgent: c.UserAgent}, "https": &HTTPDownloader{userAgent: c.UserAgent}, - "smb": &SMBDownloader{bufferSize: nil} + "smb": &SMBDownloader{bufferSize: nil}, } } return &DownloadClient{config: c} } -// A downloader is responsible for actually taking a remote URL and -// downloading it. +// A downloader implements the ability to transfer a file, and cancel or resume +// it. type Downloader interface { + Resume() Cancel() + Progress() uint64 + Total() uint64 +} + +// A LocalDownloader is responsible for converting a uri to a local path +// that the platform can open directly. +type LocalDownloader interface { + toPath(string, url.URL) (string,error) +} + +// A RemoteDownloader is responsible for actually taking a remote URL and +// downloading it. +type RemoteDownloader interface { Download(*os.File, *url.URL) error - Progress() uint - Total() uint } func (d *DownloadClient) Cancel() { @@ -119,75 +131,54 @@ func (d *DownloadClient) Get() (string, error) { return d.config.TargetPath, nil } + /* parse the configuration url into a net/url object */ u, err := url.Parse(d.config.Url) - if err != nil { - return "", err - } - + if err != nil { return "", err } log.Printf("Parsed URL: %#v", u) - /* FIXME: - handle the special case of d.config.CopyFile which returns the path - in an os-specific format. - */ + /* use the current working directory as the base for relative uri's */ + cwd,err := os.Getwd() + if err != nil { return "", err } - // Files when we don't copy the file are special cased. - var f *os.File + // Determine which is the correct downloader to use var finalPath string - sourcePath := "" - if u.Scheme == "file" && !d.config.CopyFile { - // This is special case for relative path in this case user specify - // file:../ and after parse destination goes to Opaque - if u.Path != "" { - // If url.Path is set just use this - finalPath = u.Path - } else if u.Opaque != "" { - // otherwise try url.Opaque - finalPath = u.Opaque - } - // This is a special case where we use a source file that already exists - // locally and we don't make a copy. Normally we would copy or download. - log.Printf("[DEBUG] Using local file: %s", finalPath) - // transform the actual file uri to a windowsy path if we're being windowsy. - if runtime.GOOS == "windows" { - // FIXME: cwd should point to a path relative to the TEMPLATE path, - // but since this isn't exposed to us anywhere, we use os.Getwd() - // and assume the user ran packer in the same directory that - // any relative files are located at. - cwd,err := os.Getwd() - if err != nil { - return "", fmt.Errorf("Unable to get working directory") - } - finalPath = NormalizeWindowsURL(cwd, *url) - } + var ok bool + d.downloader, ok = d.config.DownloaderMap[u.Scheme] + if !ok { + return "", fmt.Errorf("No downloader for scheme: %s", u.Scheme) + } - // Keep track of the source so we can make sure not to delete this later - sourcePath = finalPath - if _, err = os.Stat(finalPath); err != nil { - return "", err - } - } else { + remote,ok := d.downloader.(RemoteDownloader) + if !ok { + return "", fmt.Errorf("Unable to treat uri scheme %s as a Downloader : %T", u.Scheme, d.downloader) + } + + local,ok := d.downloader.(LocalDownloader) + if !ok && !d.config.CopyFile{ + return "", fmt.Errorf("Not allowed to use uri scheme %s in no copy file mode : %T", u.Scheme, d.downloader) + } + + // If we're copying the file, then just use the actual downloader + if d.config.CopyFile { + var f *os.File finalPath = d.config.TargetPath - var ok bool - d.downloader, ok = d.config.DownloaderMap[u.Scheme] - if !ok { - return "", fmt.Errorf("No downloader for scheme: %s", u.Scheme) - } - - // Otherwise, download using the downloader. f, err = os.OpenFile(finalPath, os.O_RDWR|os.O_CREATE, os.FileMode(0666)) - if err != nil { - return "", err - } + if err != nil { return "", err } log.Printf("[DEBUG] Downloading: %s", u.String()) - err = d.downloader.Download(f, u) + err = remote.Download(f, u) f.Close() - if err != nil { - return "", err - } + if err != nil { return "", err } + + // Otherwise if our Downloader is a LocalDownloader we can just use the + // path after transforming it. + } else { + finalPath,err = local.toPath(cwd, *u) + if err != nil { return "", err } + + log.Printf("[DEBUG] Using local file: %s", finalPath) } if d.config.Hash != nil { @@ -195,9 +186,7 @@ func (d *DownloadClient) Get() (string, error) { verify, err = d.VerifyChecksum(finalPath) if err == nil && !verify { // Only delete the file if we made a copy or downloaded it - if sourcePath != finalPath { - os.Remove(finalPath) - } + if d.config.CopyFile { os.Remove(finalPath) } err = fmt.Errorf( "checksums didn't match expected: %s", @@ -210,10 +199,7 @@ func (d *DownloadClient) Get() (string, error) { // PercentProgress returns the download progress as a percentage. func (d *DownloadClient) PercentProgress() int { - if d.downloader == nil { - return -1 - } - + if d.downloader == nil { return -1 } return int((float64(d.downloader.Progress()) / float64(d.downloader.Total())) * 100) } @@ -239,12 +225,16 @@ func (d *DownloadClient) VerifyChecksum(path string) (bool, error) { // HTTPDownloader is an implementation of Downloader that downloads // files over HTTP. type HTTPDownloader struct { - progress uint - total uint + progress uint64 + total uint64 userAgent string } -func (*HTTPDownloader) Cancel() { +func (d *HTTPDownloader) Cancel() { + // TODO(mitchellh): Implement +} + +func (d *HTTPDownloader) Resume() { // TODO(mitchellh): Implement } @@ -285,7 +275,7 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { if fi, err := dst.Stat(); err == nil { if _, err = dst.Seek(0, os.SEEK_END); err == nil { req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) - d.progress = uint(fi.Size()) + d.progress = uint64(fi.Size()) } } } @@ -299,7 +289,7 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { return err } - d.total = d.progress + uint(resp.ContentLength) + d.total = d.progress + uint64(resp.ContentLength) var buffer [4096]byte for { n, err := resp.Body.Read(buffer[:]) @@ -307,7 +297,7 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { return err } - d.progress += uint(n) + d.progress += uint64(n) if _, werr := dst.Write(buffer[:n]); werr != nil { return werr @@ -321,29 +311,41 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { return nil } -func (d *HTTPDownloader) Progress() uint { +func (d *HTTPDownloader) Progress() uint64 { return d.progress } -func (d *HTTPDownloader) Total() uint { +func (d *HTTPDownloader) Total() uint64 { return d.total } // FTPDownloader is an implementation of Downloader that downloads // files over FTP. type FTPDownloader struct { - userInfo url.UserInfo + userInfo *url.Userinfo mtu uint active bool - progress uint - total uint + progress uint64 + total uint64 } -func (*FTPDownloader) Cancel() { +func (d *FTPDownloader) Progress() uint64 { + return d.progress +} + +func (d *FTPDownloader) Total() uint64 { + return d.total +} + +func (d *FTPDownloader) Cancel() { d.active = false } +func (d *FTPDownloader) Resume() { + // TODO: Implement +} + func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { var userinfo *url.Userinfo @@ -351,33 +353,34 @@ func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { d.active = false // check the uri is correct - uri, err := url.Parse(src) - if err != nil { return err } - - if uri.Scheme != "ftp" { - return fmt.Errorf("Unexpected uri scheme: %s", uri.Scheme) + if src == nil || src.Scheme != "ftp" { + return fmt.Errorf("Unexpected uri scheme: %s", src.Scheme) } + uri := src // connect to ftp server var cli *ftp.ServerConn - log.Printf("Starting download over FTP: %s : %s\n", uri.Host, Uri.Path) + log.Printf("Starting download over FTP: %s : %s\n", uri.Host, uri.Path) cli,err := ftp.Dial(uri.Host) if err != nil { return nil } - defer cli.Close() + defer cli.Quit() // handle authentication if uri.User != nil { userinfo = uri.User } - log.Printf("Authenticating to FTP server: %s : %s\n", uri.User.username, uri.User.password) - err = cli.Login(userinfo.username, userinfo.password) + pass,ok := userinfo.Password() + if !ok { pass = "ftp@" } + + log.Printf("Authenticating to FTP server: %s : %s\n", userinfo.Username(), pass) + err = cli.Login(userinfo.Username(), pass) if err != nil { return err } // locate specified path - path := path.Dir(uri.Path) + p := path.Dir(uri.Path) - log.Printf("Changing to FTP directory : %s\n", path) - err = cli.ChangeDir(path) + log.Printf("Changing to FTP directory : %s\n", p) + err = cli.ChangeDir(p) if err != nil { return nil } curpath,err := cli.CurrentDir() @@ -393,7 +396,7 @@ func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { entries,err := cli.List(curpath) for _,e := range entries { - if e.Type == ftp.EntryTypeFile && e.Name == name { + if e.Type == ftp.EntryTypeFile && e.Name == name { entry = e break } @@ -414,16 +417,15 @@ func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { // do it in a goro so that if someone wants to cancel it, they can errch := make(chan error) - go func(d *FTPDownloader, r *io.Reader, w *bufio.Writer, e chan error) { - defer w.Flush() - for ; d.active { - n,err := io.CopyN(writer, reader, d.mtu) + go func(d *FTPDownloader, r io.Reader, w io.Writer, e chan error) { + for ; d.active; { + n,err := io.CopyN(w, r, int64(d.mtu)) if err != nil { break } - d.progress += n + d.progress += uint64(n) } d.active = false e <- err - }(d, reader, bufio.NewWriter(dst), errch) + }(d, reader, dst, errch) // spin until it's done err = <-errch @@ -433,106 +435,109 @@ func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { err = fmt.Errorf("FTP total transfer size was %d when %d was expected", d.progress, d.total) } - // log out and quit + // log out cli.Logout() - cli.Quit() return err } -func (d *FTPDownloader) Progress() uint { - return d.progress -} - -func (d *FTPDownloader) Total() uint { - return d.total -} - // FileDownloader is an implementation of Downloader that downloads // files using the regular filesystem. type FileDownloader struct { bufferSize *uint active bool - progress uint - total uint + progress uint64 + total uint64 } -func (*FileDownloader) Cancel() { +func (d *FileDownloader) Progress() uint64 { + return d.progress +} + +func (d *FileDownloader) Total() uint64 { + return d.total +} + +func (d *FileDownloader) Cancel() { d.active = false } -func (d *FileDownloader) Progress() uint { - return d.progress +func (d *FileDownloader) Resume() { + // TODO: Implement +} + +func (d *FileDownloader) toPath(base string, uri url.URL) (string,error) { + var result string + + // absolute path -- file://c:/absolute/path -> c:/absolute/path + if strings.HasSuffix(uri.Host, ":") { + result = path.Join(uri.Host, uri.Path) + + // semi-absolute path (current drive letter) + // -- file:///absolute/path -> /absolute/path + } else if uri.Host == "" && strings.HasPrefix(uri.Path, "/") { + result = path.Join(filepath.VolumeName(base), uri.Path) + + // relative path -- file://./relative/path -> ./relative/path + } else if uri.Host == "." { + result = path.Join(base, uri.Path) + + // relative path -- file://relative/path -> ./relative/path + } else { + result = path.Join(base, uri.Host, uri.Path) + } + return filepath.ToSlash(result),nil } func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { d.active = false - /* parse the uri using the net/url module */ - uri, err := url.Parse(src) - if uri.Scheme != "file" { - return fmt.Errorf("Unexpected uri scheme: %s", uri.Scheme) + /* check the uri's scheme to make sure it matches */ + if src == nil || src.Scheme != "file" { + return fmt.Errorf("Unexpected uri scheme: %s", src.Scheme) } + uri := src /* use the current working directory as the base for relative uri's */ cwd,err := os.Getwd() - if err != nil { - return "", fmt.Errorf("Unable to get working directory") - } + if err != nil { return err } /* determine which uri format is being used and convert to a real path */ - var realpath string, basepath string - basepath = filepath.ToSlash(cwd) - - // absolute path -- file://c:/absolute/path - if strings.HasSuffix(uri.Host, ":") { - realpath = path.Join(uri.Host, uri.Path) - - // semi-absolute path (current drive letter) -- file:///absolute/path - } else if uri.Host == "" && strings.HasPrefix(uri.Path, "/") { - realpath = path.Join(filepath.VolumeName(basepath), uri.Path) - - // relative path -- file://./relative/path - } else if uri.Host == "." { - realpath = path.Join(basepath, uri.Path) - - // relative path -- file://relative/path - } else { - realpath = path.Join(basepath, uri.Host, uri.Path) - } + realpath,err := d.toPath(cwd, *uri) + if err != nil { return err } /* download the file using the operating system's facilities */ d.progress = 0 d.active = true - f, err = os.Open(realpath) + f, err := os.Open(realpath) if err != nil { return err } defer f.Close() // get the file size fi, err := f.Stat() if err != nil { return err } - d.total = fi.Size() + d.total = uint64(fi.Size()) // no bufferSize specified, so copy synchronously. if d.bufferSize == nil { - n,err := io.Copy(dst, f) + var n int64 + n,err = io.Copy(dst, f) d.active = false - d.progress += n + d.progress += uint64(n) // use a goro in case someone else wants to enable cancel/resume } else { errch := make(chan error) - go func(d* FileDownloader, r *bufio.Reader, w *bufio.Writer, e chan error) { - defer w.Flush() - for ; d.active { - n,err := io.CopyN(writer, reader, d.bufferSize) + go func(d* FileDownloader, r io.Reader, w io.Writer, e chan error) { + for ; d.active; { + n,err := io.CopyN(w, r, int64(*d.bufferSize)) if err != nil { break } - d.progress += n + d.progress += uint64(n) } d.active = false e <- err - }(d, f, bufio.NewWriter(dst), errch) + }(d, f, dst, errch) // ...and we spin until it's done err = <-errch @@ -541,77 +546,91 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { return err } -func (d *FileDownloader) Total() uint { - return d.total -} - // SMBDownloader is an implementation of Downloader that downloads // files using the "\\" path format on Windows type SMBDownloader struct { bufferSize *uint active bool - progress uint - total uint + progress uint64 + total uint64 } -func (*SMBDownloader) Cancel() { - d.active = false -} - -func (d *SMBDownloader) Progress() uint { +func (d *SMBDownloader) Progress() uint64 { return d.progress } -func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { - const UNCPrefix = string(os.PathSeparator)+string(os.PathSeparator) +func (d *SMBDownloader) Total() uint64 { + return d.total +} + +func (d *SMBDownloader) Cancel() { d.active = false +} + +func (d *SMBDownloader) Resume() { + // TODO: Implement +} + +func (d *SMBDownloader) toPath(base string, uri url.URL) (string,error) { + const UNCPrefix = string(os.PathSeparator)+string(os.PathSeparator) if runtime.GOOS != "windows" { - return fmt.Errorf("Support for SMB based uri's are not supported on %s", runtime.GOOS) + return "",fmt.Errorf("Support for SMB based uri's are not supported on %s", runtime.GOOS) } + return UNCPrefix + filepath.ToSlash(path.Join(uri.Host, uri.Path)), nil +} + +func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { + d.active = false + /* convert the uri using the net/url module to a UNC path */ - var realpath string - uri, err := url.Parse(src) - if uri.Scheme != "smb" { - return fmt.Errorf("Unexpected uri scheme: %s", uri.Scheme) + if src == nil || src.Scheme != "smb" { + return fmt.Errorf("Unexpected uri scheme: %s", src.Scheme) } + uri := src - realpath = UNCPrefix + filepath.ToSlash(path.Join(uri.Host, uri.Path)) + /* use the current working directory as the base for relative uri's */ + cwd,err := os.Getwd() + if err != nil { return err } + + /* convert uri to an smb-path */ + realpath,err := d.toPath(cwd, *uri) + if err != nil { return err } /* Open up the "\\"-prefixed path using the Windows filesystem */ d.progress = 0 d.active = true - f, err = os.Open(realpath) + f, err := os.Open(realpath) if err != nil { return err } defer f.Close() // get the file size (at the risk of performance) fi, err := f.Stat() if err != nil { return err } - d.total = fi.Size() + d.total = uint64(fi.Size()) // no bufferSize specified, so copy synchronously. if d.bufferSize == nil { - n,err := io.Copy(dst, f) + var n int64 + n,err = io.Copy(dst, f) d.active = false - d.progress += n + d.progress += uint64(n) // use a goro in case someone else wants to enable cancel/resume } else { errch := make(chan error) - go func(d* SMBDownloader, r *bufio.Reader, w *bufio.Writer, e chan error) { - defer w.Flush() - for ; d.active { - n,err := io.CopyN(writer, reader, d.bufferSize) + go func(d* SMBDownloader, r io.Reader, w io.Writer, e chan error) { + for ; d.active; { + n,err := io.CopyN(w, r, int64(*d.bufferSize)) if err != nil { break } - d.progress += n + d.progress += uint64(n) } d.active = false e <- err - }(d, f, bufio.NewWriter(dst), errch) + }(d, f, dst, errch) // ...and as usual we spin until it's done err = <-errch @@ -619,7 +638,3 @@ func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { f.Close() return err } - -func (d *SMBDownloader) Total() uint { - return d.total -} From 2f1104625df488bea7d261f079bb69a3eb41d72d Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Tue, 5 Apr 2016 16:51:17 -0500 Subject: [PATCH 037/251] Fixed some of the unit-tests in common/ due to the changes made in {config,download}.go config.go: Fixed some issues related to the url scheme not being lowercased which broke some of the tests. config_test.go: Removed the UNC share test for \\host\share\file since SMB support has been moved to a different uri scheme. download_test.go: Explicitly set the CopyFile configuration option for all the unit-tests that test file copying capability. Removed the UNC share testcase since it's under a different uri scheme now. Modified the file:// UNC share testcase to explicitly test the smb:// uri. Changed the incorrect t.Errorf calls to t.Logf so that the tests can pass. --- common/config.go | 13 +++++++++++-- common/config_test.go | 24 ++++++++++++++++-------- common/download_test.go | 21 +++++++++++++-------- 3 files changed, 40 insertions(+), 18 deletions(-) diff --git a/common/config.go b/common/config.go index 103c51105..bcc0e08e8 100644 --- a/common/config.go +++ b/common/config.go @@ -6,6 +6,7 @@ import ( "path/filepath" "strings" "time" + "net/url" ) // PackerKeyEnv is used to specify the key interval (delay) between keystrokes @@ -50,7 +51,7 @@ func DownloadableURL(original string) (string, error) { supported := []string{"file", "http", "https", "ftp", "smb"} found := false for _, s := range supported { - if strings.HasPrefix(original, s + "://") { + if strings.HasPrefix(strings.ToLower(original), s + "://") { found = true break } @@ -59,7 +60,15 @@ func DownloadableURL(original string) (string, error) { // If it's properly prefixed with something we support, then we don't need // to make it a uri. if found { - return original, nil + original = filepath.ToSlash(original) + + // make sure that it can be parsed though.. + uri,err := url.Parse(original) + if err != nil { return "", err } + + uri.Scheme = strings.ToLower(uri.Scheme) + + return uri.String(), nil } // If the file exists, then make it an absolute path diff --git a/common/config_test.go b/common/config_test.go index 365515109..3b1aff8cf 100644 --- a/common/config_test.go +++ b/common/config_test.go @@ -5,7 +5,6 @@ import ( "io/ioutil" "os" "path/filepath" - "runtime" "strings" "testing" ) @@ -38,6 +37,21 @@ func TestChooseString(t *testing.T) { } func TestDownloadableURL(t *testing.T) { + // Invalid URL: has hex code in host + _, err := DownloadableURL("http://what%20.com") + if err == nil { + t.Fatalf("expected err : %s", err) + } + + // Valid: http + u, err := DownloadableURL("HTTP://packer.io/path") + if err != nil { + t.Fatalf("err: %s", err) + } + + if u != "http://packer.io/path" { + t.Fatalf("bad: %s", u) + } cases := []struct { InputString string @@ -154,11 +168,7 @@ func TestDownloadableURL_FilePaths(t *testing.T) { } tfPath = filepath.Clean(tfPath) - filePrefix := "file://" - if runtime.GOOS == "windows" { - filePrefix += "/" - } // Relative filepath. We run this test in a func so that // the defers run right away. @@ -180,9 +190,7 @@ func TestDownloadableURL_FilePaths(t *testing.T) { t.Fatalf("err: %s", err) } - expected := fmt.Sprintf("%s%s", - filePrefix, - strings.Replace(tfPath, `\`, `/`, -1)) + expected := "file://" + strings.Replace(tfPath, `\`, `/`, -1) if u != expected { t.Fatalf("unexpected: %#v != %#v", u, expected) } diff --git a/common/download_test.go b/common/download_test.go index b46d81455..091d9c7d7 100644 --- a/common/download_test.go +++ b/common/download_test.go @@ -58,6 +58,7 @@ func TestDownloadClient_basic(t *testing.T) { client := NewDownloadClient(&DownloadConfig{ Url: ts.URL + "/basic.txt", TargetPath: tf.Name(), + CopyFile: true, }) path, err := client.Get() @@ -93,6 +94,7 @@ func TestDownloadClient_checksumBad(t *testing.T) { TargetPath: tf.Name(), Hash: HashForType("md5"), Checksum: checksum, + CopyFile: true, }) if _, err := client.Get(); err == nil { t.Fatal("should error") @@ -117,6 +119,7 @@ func TestDownloadClient_checksumGood(t *testing.T) { TargetPath: tf.Name(), Hash: HashForType("md5"), Checksum: checksum, + CopyFile: true, }) path, err := client.Get() if err != nil { @@ -147,6 +150,7 @@ func TestDownloadClient_checksumNoDownload(t *testing.T) { TargetPath: "./test-fixtures/root/another.txt", Hash: HashForType("md5"), Checksum: checksum, + CopyFile: true, }) path, err := client.Get() if err != nil { @@ -185,6 +189,7 @@ func TestDownloadClient_resume(t *testing.T) { client := NewDownloadClient(&DownloadConfig{ Url: ts.URL, TargetPath: tf.Name(), + CopyFile: true, }) path, err := client.Get() if err != nil { @@ -242,6 +247,7 @@ func TestDownloadClient_usesDefaultUserAgent(t *testing.T) { config := &DownloadConfig{ Url: server.URL, TargetPath: tf.Name(), + CopyFile: true, } client := NewDownloadClient(config) @@ -273,6 +279,7 @@ func TestDownloadClient_setsUserAgent(t *testing.T) { Url: server.URL, TargetPath: tf.Name(), UserAgent: "fancy user agent", + CopyFile: true, } client := NewDownloadClient(config) @@ -353,6 +360,7 @@ func TestDownloadFileUrl(t *testing.T) { if err != nil { t.Fatalf("Unable to detect working directory: %s", err) } + cwd = filepath.ToSlash(cwd) // source_path is a file path and source is a network path sourcePath := fmt.Sprintf("%s/test-fixtures/fileurl/%s", cwd, "cake") @@ -455,12 +463,10 @@ func TestFileUriTransforms(t *testing.T) { // ./relative/path -> ./relative/path // /absolute/path -> /absolute/path // c:/windows/absolute -> c:/windows/absolute - // \\host/sharename/file -> \\host/sharename/file testcases := []string{ "./%s", cwd + "/%s", volume + cwd + "/%s", - "\\\\" + host + "/" + share + "/" + cwd[1:] + "/%s", } // all regular slashed testcases @@ -471,19 +477,18 @@ func TestFileUriTransforms(t *testing.T) { if err != nil { t.Errorf("Unable to transform uri '%s' into a path : %v", uri, err) } - t.Errorf("TestFileUriTransforms : Result Path '%s'", res) + t.Logf("TestFileUriTransforms : Result Path '%s'", res) } // ...and finally the oddball windows native path - // \\host\sharename\file -> \\host/sharename/file - testpath_native := filepath.FromSlash(testpath) - testcase_native := "\\\\" + host + "\\" + share + "\\" + filepath.FromSlash(cwd[1:]) + "\\%s" - uri := "file://" + fmt.Sprintf(testcase_native, testpath_native) + // smb://host/sharename/file -> \\host\sharename\file + testcase := host + "/" + share + "/" + cwd[1:] + "/%s" + uri := "smb://" + fmt.Sprintf(testcase, testpath) t.Logf("TestFileUriTransforms : Trying Uri '%s'", uri) res,err := SimulateFileUriDownload(t, uri) if err != nil { t.Errorf("Unable to transform uri '%s' into a path", uri) return } - t.Errorf("TestFileUriTransforms : Result Path '%s'", res) + t.Logf("TestFileUriTransforms : Result Path '%s'", res) } From 0fa6c3782eed3f4b24d9716903b28fc0c30323f5 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Sun, 12 Feb 2017 17:00:47 -0600 Subject: [PATCH 038/251] Added a progressbar using gopkg.in/cheggaaa/pb.v1 as per #3578 for all the DownloadClients in common/download.go. --- common/download.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/common/download.go b/common/download.go index c06f385e8..96a40d488 100644 --- a/common/download.go +++ b/common/download.go @@ -23,6 +23,7 @@ import ( "io" "path/filepath" "net/http" + "gopkg.in/cheggaaa/pb.v1" "github.com/jlaffaye/ftp" ) @@ -157,6 +158,7 @@ func (d *DownloadClient) Get() (string, error) { local,ok := d.downloader.(LocalDownloader) if !ok && !d.config.CopyFile{ return "", fmt.Errorf("Not allowed to use uri scheme %s in no copy file mode : %T", u.Scheme, d.downloader) + d.config.CopyFile = true } // If we're copying the file, then just use the actual downloader @@ -275,7 +277,9 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { if fi, err := dst.Stat(); err == nil { if _, err = dst.Seek(0, os.SEEK_END); err == nil { req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) + d.progress = uint64(fi.Size()) + progressBar.Set64(int64(d.Progress())) } } } @@ -290,6 +294,8 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { } d.total = d.progress + uint64(resp.ContentLength) + progressBar := pb.New64(int64(d.Total())).Start() + var buffer [4096]byte for { n, err := resp.Body.Read(buffer[:]) @@ -298,6 +304,7 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { } d.progress += uint64(n) + progressBar.Set64(int64(d.Progress())) if _, werr := dst.Write(buffer[:n]); werr != nil { return werr @@ -409,6 +416,7 @@ func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { d.progress = 0 d.total = entry.Size + progressBar := pb.New64(int64(d.Total())).Start() // download specified file d.active = true @@ -421,7 +429,9 @@ func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { for ; d.active; { n,err := io.CopyN(w, r, int64(d.mtu)) if err != nil { break } + d.progress += uint64(n) + progressBar.Set64(int64(d.Progress())) } d.active = false e <- err @@ -518,13 +528,16 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { fi, err := f.Stat() if err != nil { return err } d.total = uint64(fi.Size()) + progressBar := pb.New64(int64(d.Total())).Start() // no bufferSize specified, so copy synchronously. if d.bufferSize == nil { var n int64 n,err = io.Copy(dst, f) d.active = false + d.progress += uint64(n) + progressBar.Set64(int64(d.Progress())) // use a goro in case someone else wants to enable cancel/resume } else { @@ -533,7 +546,9 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { for ; d.active; { n,err := io.CopyN(w, r, int64(*d.bufferSize)) if err != nil { break } + d.progress += uint64(n) + progressBar.Set64(int64(d.Progress())) } d.active = false e <- err @@ -611,13 +626,16 @@ func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { fi, err := f.Stat() if err != nil { return err } d.total = uint64(fi.Size()) + progressBar := pb.New64(int64(d.Total())).Start() // no bufferSize specified, so copy synchronously. if d.bufferSize == nil { var n int64 n,err = io.Copy(dst, f) d.active = false + d.progress += uint64(n) + progressBar.Set64(int64(d.Progress())) // use a goro in case someone else wants to enable cancel/resume } else { @@ -626,7 +644,9 @@ func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { for ; d.active; { n,err := io.CopyN(w, r, int64(*d.bufferSize)) if err != nil { break } + d.progress += uint64(n) + progressBar.Set64(int64(d.Progress())) } d.active = false e <- err From 11ff4439a691671c36421bb46114386735ce735e Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Wed, 1 Mar 2017 14:35:00 -0600 Subject: [PATCH 039/251] Moved the setting of HTTPDownloader's current progress to after the object actually gets instantiated. ;) --- common/download.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/download.go b/common/download.go index 96a40d488..f8b273e8d 100644 --- a/common/download.go +++ b/common/download.go @@ -279,7 +279,6 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) d.progress = uint64(fi.Size()) - progressBar.Set64(int64(d.Progress())) } } } @@ -295,6 +294,7 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { d.total = d.progress + uint64(resp.ContentLength) progressBar := pb.New64(int64(d.Total())).Start() + progressBar.Set64(int64(d.Progress())) var buffer [4096]byte for { From b1ff14714b093bde3b14fc824fad27edcf6de751 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Wed, 1 Mar 2017 14:37:05 -0600 Subject: [PATCH 040/251] go fmt --- common/config.go | 22 +++-- common/download.go | 205 +++++++++++++++++++++++++--------------- common/download_test.go | 14 +-- 3 files changed, 148 insertions(+), 93 deletions(-) diff --git a/common/config.go b/common/config.go index bcc0e08e8..0e97f6a0e 100644 --- a/common/config.go +++ b/common/config.go @@ -2,11 +2,11 @@ package common import ( "fmt" + "net/url" "os" "path/filepath" "strings" "time" - "net/url" ) // PackerKeyEnv is used to specify the key interval (delay) between keystrokes @@ -51,7 +51,7 @@ func DownloadableURL(original string) (string, error) { supported := []string{"file", "http", "https", "ftp", "smb"} found := false for _, s := range supported { - if strings.HasPrefix(strings.ToLower(original), s + "://") { + if strings.HasPrefix(strings.ToLower(original), s+"://") { found = true break } @@ -63,8 +63,10 @@ func DownloadableURL(original string) (string, error) { original = filepath.ToSlash(original) // make sure that it can be parsed though.. - uri,err := url.Parse(original) - if err != nil { return "", err } + uri, err := url.Parse(original) + if err != nil { + return "", err + } uri.Scheme = strings.ToLower(uri.Scheme) @@ -72,16 +74,20 @@ func DownloadableURL(original string) (string, error) { } // If the file exists, then make it an absolute path - _,err := os.Stat(original) + _, err := os.Stat(original) if err == nil { original, err = filepath.Abs(filepath.FromSlash(original)) - if err != nil { return "", err } + if err != nil { + return "", err + } original, err = filepath.EvalSymlinks(original) - if err != nil { return "", err } + if err != nil { + return "", err + } original = filepath.Clean(original) - original = filepath.ToSlash(original) + original = filepath.ToSlash(original) } // Since it wasn't properly prefixed, let's make it into a well-formed diff --git a/common/download.go b/common/download.go index f8b273e8d..110cb1536 100644 --- a/common/download.go +++ b/common/download.go @@ -13,21 +13,20 @@ import ( "log" "net/url" "os" - "runtime" "path" + "runtime" "strings" ) // imports related to each Downloader implementation import ( - "io" - "path/filepath" - "net/http" - "gopkg.in/cheggaaa/pb.v1" "github.com/jlaffaye/ftp" + "gopkg.in/cheggaaa/pb.v1" + "io" + "net/http" + "path/filepath" ) - // DownloadConfig is the configuration given to instantiate a new // download instance. Once a configuration is used to instantiate // a download client, it must not be modified. @@ -112,7 +111,7 @@ type Downloader interface { // A LocalDownloader is responsible for converting a uri to a local path // that the platform can open directly. type LocalDownloader interface { - toPath(string, url.URL) (string,error) + toPath(string, url.URL) (string, error) } // A RemoteDownloader is responsible for actually taking a remote URL and @@ -134,12 +133,16 @@ func (d *DownloadClient) Get() (string, error) { /* parse the configuration url into a net/url object */ u, err := url.Parse(d.config.Url) - if err != nil { return "", err } + if err != nil { + return "", err + } log.Printf("Parsed URL: %#v", u) /* use the current working directory as the base for relative uri's */ - cwd,err := os.Getwd() - if err != nil { return "", err } + cwd, err := os.Getwd() + if err != nil { + return "", err + } // Determine which is the correct downloader to use var finalPath string @@ -150,13 +153,13 @@ func (d *DownloadClient) Get() (string, error) { return "", fmt.Errorf("No downloader for scheme: %s", u.Scheme) } - remote,ok := d.downloader.(RemoteDownloader) + remote, ok := d.downloader.(RemoteDownloader) if !ok { return "", fmt.Errorf("Unable to treat uri scheme %s as a Downloader : %T", u.Scheme, d.downloader) } - local,ok := d.downloader.(LocalDownloader) - if !ok && !d.config.CopyFile{ + local, ok := d.downloader.(LocalDownloader) + if !ok && !d.config.CopyFile { return "", fmt.Errorf("Not allowed to use uri scheme %s in no copy file mode : %T", u.Scheme, d.downloader) d.config.CopyFile = true } @@ -167,18 +170,24 @@ func (d *DownloadClient) Get() (string, error) { finalPath = d.config.TargetPath f, err = os.OpenFile(finalPath, os.O_RDWR|os.O_CREATE, os.FileMode(0666)) - if err != nil { return "", err } + if err != nil { + return "", err + } log.Printf("[DEBUG] Downloading: %s", u.String()) err = remote.Download(f, u) f.Close() - if err != nil { return "", err } + if err != nil { + return "", err + } - // Otherwise if our Downloader is a LocalDownloader we can just use the - // path after transforming it. + // Otherwise if our Downloader is a LocalDownloader we can just use the + // path after transforming it. } else { - finalPath,err = local.toPath(cwd, *u) - if err != nil { return "", err } + finalPath, err = local.toPath(cwd, *u) + if err != nil { + return "", err + } log.Printf("[DEBUG] Using local file: %s", finalPath) } @@ -188,7 +197,9 @@ func (d *DownloadClient) Get() (string, error) { verify, err = d.VerifyChecksum(finalPath) if err == nil && !verify { // Only delete the file if we made a copy or downloaded it - if d.config.CopyFile { os.Remove(finalPath) } + if d.config.CopyFile { + os.Remove(finalPath) + } err = fmt.Errorf( "checksums didn't match expected: %s", @@ -201,7 +212,9 @@ func (d *DownloadClient) Get() (string, error) { // PercentProgress returns the download progress as a percentage. func (d *DownloadClient) PercentProgress() int { - if d.downloader == nil { return -1 } + if d.downloader == nil { + return -1 + } return int((float64(d.downloader.Progress()) / float64(d.downloader.Total())) * 100) } @@ -330,11 +343,11 @@ func (d *HTTPDownloader) Total() uint64 { // files over FTP. type FTPDownloader struct { userInfo *url.Userinfo - mtu uint + mtu uint - active bool + active bool progress uint64 - total uint64 + total uint64 } func (d *FTPDownloader) Progress() uint64 { @@ -369,40 +382,52 @@ func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { var cli *ftp.ServerConn log.Printf("Starting download over FTP: %s : %s\n", uri.Host, uri.Path) - cli,err := ftp.Dial(uri.Host) - if err != nil { return nil } + cli, err := ftp.Dial(uri.Host) + if err != nil { + return nil + } defer cli.Quit() // handle authentication - if uri.User != nil { userinfo = uri.User } + if uri.User != nil { + userinfo = uri.User + } - pass,ok := userinfo.Password() - if !ok { pass = "ftp@" } + pass, ok := userinfo.Password() + if !ok { + pass = "ftp@" + } log.Printf("Authenticating to FTP server: %s : %s\n", userinfo.Username(), pass) err = cli.Login(userinfo.Username(), pass) - if err != nil { return err } + if err != nil { + return err + } // locate specified path p := path.Dir(uri.Path) log.Printf("Changing to FTP directory : %s\n", p) err = cli.ChangeDir(p) - if err != nil { return nil } + if err != nil { + return nil + } - curpath,err := cli.CurrentDir() - if err != nil { return err } + curpath, err := cli.CurrentDir() + if err != nil { + return err + } log.Printf("Current FTP directory : %s\n", curpath) // collect stats about the specified file var name string var entry *ftp.Entry - _,name = path.Split(uri.Path) + _, name = path.Split(uri.Path) entry = nil - entries,err := cli.List(curpath) - for _,e := range entries { + entries, err := cli.List(curpath) + for _, e := range entries { if e.Type == ftp.EntryTypeFile && e.Name == name { entry = e break @@ -420,15 +445,19 @@ func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { // download specified file d.active = true - reader,err := cli.RetrFrom(uri.Path, d.progress) - if err != nil { return nil } + reader, err := cli.RetrFrom(uri.Path, d.progress) + if err != nil { + return nil + } // do it in a goro so that if someone wants to cancel it, they can errch := make(chan error) go func(d *FTPDownloader, r io.Reader, w io.Writer, e chan error) { - for ; d.active; { - n,err := io.CopyN(w, r, int64(d.mtu)) - if err != nil { break } + for d.active { + n, err := io.CopyN(w, r, int64(d.mtu)) + if err != nil { + break + } d.progress += uint64(n) progressBar.Set64(int64(d.Progress())) @@ -455,9 +484,9 @@ func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { type FileDownloader struct { bufferSize *uint - active bool + active bool progress uint64 - total uint64 + total uint64 } func (d *FileDownloader) Progress() uint64 { @@ -476,27 +505,27 @@ func (d *FileDownloader) Resume() { // TODO: Implement } -func (d *FileDownloader) toPath(base string, uri url.URL) (string,error) { +func (d *FileDownloader) toPath(base string, uri url.URL) (string, error) { var result string // absolute path -- file://c:/absolute/path -> c:/absolute/path if strings.HasSuffix(uri.Host, ":") { result = path.Join(uri.Host, uri.Path) - // semi-absolute path (current drive letter) - // -- file:///absolute/path -> /absolute/path + // semi-absolute path (current drive letter) + // -- file:///absolute/path -> /absolute/path } else if uri.Host == "" && strings.HasPrefix(uri.Path, "/") { result = path.Join(filepath.VolumeName(base), uri.Path) - // relative path -- file://./relative/path -> ./relative/path + // relative path -- file://./relative/path -> ./relative/path } else if uri.Host == "." { result = path.Join(base, uri.Path) - // relative path -- file://relative/path -> ./relative/path + // relative path -- file://relative/path -> ./relative/path } else { result = path.Join(base, uri.Host, uri.Path) } - return filepath.ToSlash(result),nil + return filepath.ToSlash(result), nil } func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { @@ -509,43 +538,53 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { uri := src /* use the current working directory as the base for relative uri's */ - cwd,err := os.Getwd() - if err != nil { return err } + cwd, err := os.Getwd() + if err != nil { + return err + } /* determine which uri format is being used and convert to a real path */ - realpath,err := d.toPath(cwd, *uri) - if err != nil { return err } + realpath, err := d.toPath(cwd, *uri) + if err != nil { + return err + } /* download the file using the operating system's facilities */ d.progress = 0 d.active = true f, err := os.Open(realpath) - if err != nil { return err } + if err != nil { + return err + } defer f.Close() // get the file size fi, err := f.Stat() - if err != nil { return err } + if err != nil { + return err + } d.total = uint64(fi.Size()) progressBar := pb.New64(int64(d.Total())).Start() // no bufferSize specified, so copy synchronously. if d.bufferSize == nil { var n int64 - n,err = io.Copy(dst, f) + n, err = io.Copy(dst, f) d.active = false d.progress += uint64(n) progressBar.Set64(int64(d.Progress())) - // use a goro in case someone else wants to enable cancel/resume + // use a goro in case someone else wants to enable cancel/resume } else { errch := make(chan error) - go func(d* FileDownloader, r io.Reader, w io.Writer, e chan error) { - for ; d.active; { - n,err := io.CopyN(w, r, int64(*d.bufferSize)) - if err != nil { break } + go func(d *FileDownloader, r io.Reader, w io.Writer, e chan error) { + for d.active { + n, err := io.CopyN(w, r, int64(*d.bufferSize)) + if err != nil { + break + } d.progress += uint64(n) progressBar.Set64(int64(d.Progress())) @@ -566,9 +605,9 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { type SMBDownloader struct { bufferSize *uint - active bool + active bool progress uint64 - total uint64 + total uint64 } func (d *SMBDownloader) Progress() uint64 { @@ -587,11 +626,11 @@ func (d *SMBDownloader) Resume() { // TODO: Implement } -func (d *SMBDownloader) toPath(base string, uri url.URL) (string,error) { - const UNCPrefix = string(os.PathSeparator)+string(os.PathSeparator) +func (d *SMBDownloader) toPath(base string, uri url.URL) (string, error) { + const UNCPrefix = string(os.PathSeparator) + string(os.PathSeparator) if runtime.GOOS != "windows" { - return "",fmt.Errorf("Support for SMB based uri's are not supported on %s", runtime.GOOS) + return "", fmt.Errorf("Support for SMB based uri's are not supported on %s", runtime.GOOS) } return UNCPrefix + filepath.ToSlash(path.Join(uri.Host, uri.Path)), nil @@ -607,43 +646,53 @@ func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { uri := src /* use the current working directory as the base for relative uri's */ - cwd,err := os.Getwd() - if err != nil { return err } + cwd, err := os.Getwd() + if err != nil { + return err + } /* convert uri to an smb-path */ - realpath,err := d.toPath(cwd, *uri) - if err != nil { return err } + realpath, err := d.toPath(cwd, *uri) + if err != nil { + return err + } /* Open up the "\\"-prefixed path using the Windows filesystem */ d.progress = 0 d.active = true f, err := os.Open(realpath) - if err != nil { return err } + if err != nil { + return err + } defer f.Close() // get the file size (at the risk of performance) fi, err := f.Stat() - if err != nil { return err } + if err != nil { + return err + } d.total = uint64(fi.Size()) progressBar := pb.New64(int64(d.Total())).Start() // no bufferSize specified, so copy synchronously. if d.bufferSize == nil { var n int64 - n,err = io.Copy(dst, f) + n, err = io.Copy(dst, f) d.active = false d.progress += uint64(n) progressBar.Set64(int64(d.Progress())) - // use a goro in case someone else wants to enable cancel/resume + // use a goro in case someone else wants to enable cancel/resume } else { errch := make(chan error) - go func(d* SMBDownloader, r io.Reader, w io.Writer, e chan error) { - for ; d.active; { - n,err := io.CopyN(w, r, int64(*d.bufferSize)) - if err != nil { break } + go func(d *SMBDownloader, r io.Reader, w io.Writer, e chan error) { + for d.active { + n, err := io.CopyN(w, r, int64(*d.bufferSize)) + if err != nil { + break + } d.progress += uint64(n) progressBar.Set64(int64(d.Progress())) diff --git a/common/download_test.go b/common/download_test.go index 091d9c7d7..81cc3244f 100644 --- a/common/download_test.go +++ b/common/download_test.go @@ -8,9 +8,9 @@ import ( "net/http" "net/http/httptest" "os" + "path/filepath" "runtime" "strings" - "path/filepath" "testing" ) @@ -119,7 +119,7 @@ func TestDownloadClient_checksumGood(t *testing.T) { TargetPath: tf.Name(), Hash: HashForType("md5"), Checksum: checksum, - CopyFile: true, + CopyFile: true, }) path, err := client.Get() if err != nil { @@ -398,7 +398,7 @@ func TestDownloadFileUrl(t *testing.T) { // into a testable file path whilst ignoring a correct checksum match, stripping // UNC path info, and then calling stat to ensure the correct file exists. // (used by TestFileUriTransforms) -func SimulateFileUriDownload(t *testing.T, uri string) (string,error) { +func SimulateFileUriDownload(t *testing.T, uri string) (string, error) { // source_path is a file path and source is a network path source := fmt.Sprintf(uri) t.Logf("Trying to download %s", source) @@ -430,7 +430,7 @@ func SimulateFileUriDownload(t *testing.T, uri string) (string,error) { if _, err = os.Stat(path); err != nil { t.Errorf("Could not stat source file: %s", path) } - return path,err + return path, err } // TestFileUriTransforms tests the case where we use a local file uri @@ -470,10 +470,10 @@ func TestFileUriTransforms(t *testing.T) { } // all regular slashed testcases - for _,testcase := range testcases { + for _, testcase := range testcases { uri := "file://" + fmt.Sprintf(testcase, testpath) t.Logf("TestFileUriTransforms : Trying Uri '%s'", uri) - res,err := SimulateFileUriDownload(t, uri) + res, err := SimulateFileUriDownload(t, uri) if err != nil { t.Errorf("Unable to transform uri '%s' into a path : %v", uri, err) } @@ -485,7 +485,7 @@ func TestFileUriTransforms(t *testing.T) { testcase := host + "/" + share + "/" + cwd[1:] + "/%s" uri := "smb://" + fmt.Sprintf(testcase, testpath) t.Logf("TestFileUriTransforms : Trying Uri '%s'", uri) - res,err := SimulateFileUriDownload(t, uri) + res, err := SimulateFileUriDownload(t, uri) if err != nil { t.Errorf("Unable to transform uri '%s' into a path", uri) return From c29e3915be5a95643a00a103e7d97d607834bfcd Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Wed, 1 Mar 2017 14:38:01 -0600 Subject: [PATCH 041/251] Added gopkg.in/cheggaaa/pb.v1 to vendor/vendor.json via govendor. --- vendor/gopkg.in/cheggaaa/pb.v1/LICENSE | 12 + vendor/gopkg.in/cheggaaa/pb.v1/README.md | 176 +++++++ vendor/gopkg.in/cheggaaa/pb.v1/format.go | 87 ++++ vendor/gopkg.in/cheggaaa/pb.v1/pb.go | 444 ++++++++++++++++++ vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go | 8 + vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go | 6 + vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go | 141 ++++++ vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go | 110 +++++ vendor/gopkg.in/cheggaaa/pb.v1/pool.go | 76 +++ vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go | 35 ++ vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go | 22 + vendor/gopkg.in/cheggaaa/pb.v1/reader.go | 25 + vendor/gopkg.in/cheggaaa/pb.v1/runecount.go | 17 + vendor/gopkg.in/cheggaaa/pb.v1/termios_bsd.go | 9 + vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go | 7 + vendor/vendor.json | 16 + 16 files changed, 1191 insertions(+) create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/LICENSE create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/README.md create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/format.go create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/pb.go create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/pool.go create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/reader.go create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/runecount.go create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/termios_bsd.go create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/LICENSE b/vendor/gopkg.in/cheggaaa/pb.v1/LICENSE new file mode 100644 index 000000000..511970333 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2012-2015, Sergey Cherepanov +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/README.md b/vendor/gopkg.in/cheggaaa/pb.v1/README.md new file mode 100644 index 000000000..31babb305 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/README.md @@ -0,0 +1,176 @@ +# Terminal progress bar for Go + +Simple progress bar for console programs. + + +## Installation + +``` +go get gopkg.in/cheggaaa/pb.v1 +``` + +## Usage + +```Go +package main + +import ( + "gopkg.in/cheggaaa/pb.v1" + "time" +) + +func main() { + count := 100000 + bar := pb.StartNew(count) + for i := 0; i < count; i++ { + bar.Increment() + time.Sleep(time.Millisecond) + } + bar.FinishPrint("The End!") +} + +``` + +Result will be like this: + +``` +> go run test.go +37158 / 100000 [================>_______________________________] 37.16% 1m11s +``` + +## Customization + +```Go +// create bar +bar := pb.New(count) + +// refresh info every second (default 200ms) +bar.SetRefreshRate(time.Second) + +// show percents (by default already true) +bar.ShowPercent = true + +// show bar (by default already true) +bar.ShowBar = true + +// no counters +bar.ShowCounters = false + +// show "time left" +bar.ShowTimeLeft = true + +// show average speed +bar.ShowSpeed = true + +// sets the width of the progress bar +bar.SetWidth(80) + +// sets the width of the progress bar, but if terminal size smaller will be ignored +bar.SetMaxWidth(80) + +// convert output to readable format (like KB, MB) +bar.SetUnits(pb.U_BYTES) + +// and start +bar.Start() +``` + +## Progress bar for IO Operations + +```go +// create and start bar +bar := pb.New(myDataLen).SetUnits(pb.U_BYTES) +bar.Start() + +// my io.Reader +r := myReader + +// my io.Writer +w := myWriter + +// create proxy reader +reader := bar.NewProxyReader(r) + +// and copy from pb reader +io.Copy(w, reader) + +``` + +```go +// create and start bar +bar := pb.New(myDataLen).SetUnits(pb.U_BYTES) +bar.Start() + +// my io.Reader +r := myReader + +// my io.Writer +w := myWriter + +// create multi writer +writer := io.MultiWriter(w, bar) + +// and copy +io.Copy(writer, r) + +bar.Finish() +``` + +## Custom Progress Bar Look-and-feel + +```go +bar.Format("<.- >") +``` + +## Multiple Progress Bars (experimental and unstable) + +Do not print to terminal while pool is active. + +```go +package main + +import ( + "math/rand" + "sync" + "time" + + "gopkg.in/cheggaaa/pb.v1" +) + +func main() { + // create bars + first := pb.New(200).Prefix("First ") + second := pb.New(200).Prefix("Second ") + third := pb.New(200).Prefix("Third ") + // start pool + pool, err := pb.StartPool(first, second, third) + if err != nil { + panic(err) + } + // update bars + wg := new(sync.WaitGroup) + for _, bar := range []*pb.ProgressBar{first, second, third} { + wg.Add(1) + go func(cb *pb.ProgressBar) { + for n := 0; n < 200; n++ { + cb.Increment() + time.Sleep(time.Millisecond * time.Duration(rand.Intn(100))) + } + cb.Finish() + wg.Done() + }(bar) + } + wg.Wait() + // close pool + pool.Stop() +} +``` + +The result will be as follows: + +``` +$ go run example/multiple.go +First 141 / 1000 [===============>---------------------------------------] 14.10 % 44s +Second 139 / 1000 [==============>---------------------------------------] 13.90 % 44s +Third 152 / 1000 [================>--------------------------------------] 15.20 % 40s +``` diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/format.go b/vendor/gopkg.in/cheggaaa/pb.v1/format.go new file mode 100644 index 000000000..d5aeff793 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/format.go @@ -0,0 +1,87 @@ +package pb + +import ( + "fmt" + "strings" + "time" +) + +type Units int + +const ( + // U_NO are default units, they represent a simple value and are not formatted at all. + U_NO Units = iota + // U_BYTES units are formatted in a human readable way (b, Bb, Mb, ...) + U_BYTES + // U_DURATION units are formatted in a human readable way (3h14m15s) + U_DURATION +) + +func Format(i int64) *formatter { + return &formatter{n: i} +} + +type formatter struct { + n int64 + unit Units + width int + perSec bool +} + +func (f *formatter) Value(n int64) *formatter { + f.n = n + return f +} + +func (f *formatter) To(unit Units) *formatter { + f.unit = unit + return f +} + +func (f *formatter) Width(width int) *formatter { + f.width = width + return f +} + +func (f *formatter) PerSec() *formatter { + f.perSec = true + return f +} + +func (f *formatter) String() (out string) { + switch f.unit { + case U_BYTES: + out = formatBytes(f.n) + case U_DURATION: + d := time.Duration(f.n) + if d > time.Hour*24 { + out = fmt.Sprintf("%dd", d/24/time.Hour) + d -= (d / time.Hour / 24) * (time.Hour * 24) + } + out = fmt.Sprintf("%s%v", out, d) + default: + out = fmt.Sprintf(fmt.Sprintf("%%%dd", f.width), f.n) + } + if f.perSec { + out += "/s" + } + return +} + +// Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B +func formatBytes(i int64) (result string) { + switch { + case i > (1024 * 1024 * 1024 * 1024): + result = fmt.Sprintf("%.02f TB", float64(i)/1024/1024/1024/1024) + case i > (1024 * 1024 * 1024): + result = fmt.Sprintf("%.02f GB", float64(i)/1024/1024/1024) + case i > (1024 * 1024): + result = fmt.Sprintf("%.02f MB", float64(i)/1024/1024) + case i > 1024: + result = fmt.Sprintf("%.02f KB", float64(i)/1024) + default: + result = fmt.Sprintf("%d B", i) + } + result = strings.Trim(result, " ") + return +} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb.go new file mode 100644 index 000000000..f1f15bff3 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pb.go @@ -0,0 +1,444 @@ +// Simple console progress bars +package pb + +import ( + "fmt" + "io" + "math" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" +) + +// Current version +const Version = "1.0.6" + +const ( + // Default refresh rate - 200ms + DEFAULT_REFRESH_RATE = time.Millisecond * 200 + FORMAT = "[=>-]" +) + +// DEPRECATED +// variables for backward compatibility, from now do not work +// use pb.Format and pb.SetRefreshRate +var ( + DefaultRefreshRate = DEFAULT_REFRESH_RATE + BarStart, BarEnd, Empty, Current, CurrentN string +) + +// Create new progress bar object +func New(total int) *ProgressBar { + return New64(int64(total)) +} + +// Create new progress bar object using int64 as total +func New64(total int64) *ProgressBar { + pb := &ProgressBar{ + Total: total, + RefreshRate: DEFAULT_REFRESH_RATE, + ShowPercent: true, + ShowCounters: true, + ShowBar: true, + ShowTimeLeft: true, + ShowFinalTime: true, + Units: U_NO, + ManualUpdate: false, + finish: make(chan struct{}), + currentValue: -1, + mu: new(sync.Mutex), + } + return pb.Format(FORMAT) +} + +// Create new object and start +func StartNew(total int) *ProgressBar { + return New(total).Start() +} + +// Callback for custom output +// For example: +// bar.Callback = func(s string) { +// mySuperPrint(s) +// } +// +type Callback func(out string) + +type ProgressBar struct { + current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278) + + Total int64 + RefreshRate time.Duration + ShowPercent, ShowCounters bool + ShowSpeed, ShowTimeLeft, ShowBar bool + ShowFinalTime bool + Output io.Writer + Callback Callback + NotPrint bool + Units Units + Width int + ForceWidth bool + ManualUpdate bool + AutoStat bool + + // Default width for the time box. + UnitsWidth int + TimeBoxWidth int + + finishOnce sync.Once //Guards isFinish + finish chan struct{} + isFinish bool + + startTime time.Time + startValue int64 + currentValue int64 + + prefix, postfix string + + mu *sync.Mutex + lastPrint string + + BarStart string + BarEnd string + Empty string + Current string + CurrentN string + + AlwaysUpdate bool +} + +// Start print +func (pb *ProgressBar) Start() *ProgressBar { + pb.startTime = time.Now() + pb.startValue = pb.current + if pb.Total == 0 { + pb.ShowTimeLeft = false + pb.ShowPercent = false + pb.AutoStat = false + } + if !pb.ManualUpdate { + pb.Update() // Initial printing of the bar before running the bar refresher. + go pb.refresher() + } + return pb +} + +// Increment current value +func (pb *ProgressBar) Increment() int { + return pb.Add(1) +} + +// Get current value +func (pb *ProgressBar) Get() int64 { + c := atomic.LoadInt64(&pb.current) + return c +} + +// Set current value +func (pb *ProgressBar) Set(current int) *ProgressBar { + return pb.Set64(int64(current)) +} + +// Set64 sets the current value as int64 +func (pb *ProgressBar) Set64(current int64) *ProgressBar { + atomic.StoreInt64(&pb.current, current) + return pb +} + +// Add to current value +func (pb *ProgressBar) Add(add int) int { + return int(pb.Add64(int64(add))) +} + +func (pb *ProgressBar) Add64(add int64) int64 { + return atomic.AddInt64(&pb.current, add) +} + +// Set prefix string +func (pb *ProgressBar) Prefix(prefix string) *ProgressBar { + pb.prefix = prefix + return pb +} + +// Set postfix string +func (pb *ProgressBar) Postfix(postfix string) *ProgressBar { + pb.postfix = postfix + return pb +} + +// Set custom format for bar +// Example: bar.Format("[=>_]") +// Example: bar.Format("[\x00=\x00>\x00-\x00]") // \x00 is the delimiter +func (pb *ProgressBar) Format(format string) *ProgressBar { + var formatEntries []string + if len(format) == 5 { + formatEntries = strings.Split(format, "") + } else { + formatEntries = strings.Split(format, "\x00") + } + if len(formatEntries) == 5 { + pb.BarStart = formatEntries[0] + pb.BarEnd = formatEntries[4] + pb.Empty = formatEntries[3] + pb.Current = formatEntries[1] + pb.CurrentN = formatEntries[2] + } + return pb +} + +// Set bar refresh rate +func (pb *ProgressBar) SetRefreshRate(rate time.Duration) *ProgressBar { + pb.RefreshRate = rate + return pb +} + +// Set units +// bar.SetUnits(U_NO) - by default +// bar.SetUnits(U_BYTES) - for Mb, Kb, etc +func (pb *ProgressBar) SetUnits(units Units) *ProgressBar { + pb.Units = units + return pb +} + +// Set max width, if width is bigger than terminal width, will be ignored +func (pb *ProgressBar) SetMaxWidth(width int) *ProgressBar { + pb.Width = width + pb.ForceWidth = false + return pb +} + +// Set bar width +func (pb *ProgressBar) SetWidth(width int) *ProgressBar { + pb.Width = width + pb.ForceWidth = true + return pb +} + +// End print +func (pb *ProgressBar) Finish() { + //Protect multiple calls + pb.finishOnce.Do(func() { + close(pb.finish) + pb.write(atomic.LoadInt64(&pb.current)) + switch { + case pb.Output != nil: + fmt.Fprintln(pb.Output) + case !pb.NotPrint: + fmt.Println() + } + pb.isFinish = true + }) +} + +// End print and write string 'str' +func (pb *ProgressBar) FinishPrint(str string) { + pb.Finish() + if pb.Output != nil { + fmt.Fprintln(pb.Output, str) + } else { + fmt.Println(str) + } +} + +// implement io.Writer +func (pb *ProgressBar) Write(p []byte) (n int, err error) { + n = len(p) + pb.Add(n) + return +} + +// implement io.Reader +func (pb *ProgressBar) Read(p []byte) (n int, err error) { + n = len(p) + pb.Add(n) + return +} + +// Create new proxy reader over bar +// Takes io.Reader or io.ReadCloser +func (pb *ProgressBar) NewProxyReader(r io.Reader) *Reader { + return &Reader{r, pb} +} + +func (pb *ProgressBar) write(current int64) { + width := pb.GetWidth() + + var percentBox, countersBox, timeLeftBox, speedBox, barBox, end, out string + + // percents + if pb.ShowPercent { + var percent float64 + if pb.Total > 0 { + percent = float64(current) / (float64(pb.Total) / float64(100)) + } else { + percent = float64(current) / float64(100) + } + percentBox = fmt.Sprintf(" %6.02f%%", percent) + } + + // counters + if pb.ShowCounters { + current := Format(current).To(pb.Units).Width(pb.UnitsWidth) + if pb.Total > 0 { + total := Format(pb.Total).To(pb.Units).Width(pb.UnitsWidth) + countersBox = fmt.Sprintf(" %s / %s ", current, total) + } else { + countersBox = fmt.Sprintf(" %s / ? ", current) + } + } + + // time left + fromStart := time.Now().Sub(pb.startTime) + currentFromStart := current - pb.startValue + select { + case <-pb.finish: + if pb.ShowFinalTime { + var left time.Duration + left = (fromStart / time.Second) * time.Second + timeLeftBox = fmt.Sprintf(" %s", left.String()) + } + default: + if pb.ShowTimeLeft && currentFromStart > 0 { + perEntry := fromStart / time.Duration(currentFromStart) + var left time.Duration + if pb.Total > 0 { + left = time.Duration(pb.Total-currentFromStart) * perEntry + left = (left / time.Second) * time.Second + } else { + left = time.Duration(currentFromStart) * perEntry + left = (left / time.Second) * time.Second + } + timeLeft := Format(int64(left)).To(U_DURATION).String() + timeLeftBox = fmt.Sprintf(" %s", timeLeft) + } + } + + if len(timeLeftBox) < pb.TimeBoxWidth { + timeLeftBox = fmt.Sprintf("%s%s", strings.Repeat(" ", pb.TimeBoxWidth-len(timeLeftBox)), timeLeftBox) + } + + // speed + if pb.ShowSpeed && currentFromStart > 0 { + fromStart := time.Now().Sub(pb.startTime) + speed := float64(currentFromStart) / (float64(fromStart) / float64(time.Second)) + speedBox = " " + Format(int64(speed)).To(pb.Units).Width(pb.UnitsWidth).PerSec().String() + } + + barWidth := escapeAwareRuneCountInString(countersBox + pb.BarStart + pb.BarEnd + percentBox + timeLeftBox + speedBox + pb.prefix + pb.postfix) + // bar + if pb.ShowBar { + size := width - barWidth + if size > 0 { + if pb.Total > 0 { + curCount := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size))) + emptCount := size - curCount + barBox = pb.BarStart + if emptCount < 0 { + emptCount = 0 + } + if curCount > size { + curCount = size + } + if emptCount <= 0 { + barBox += strings.Repeat(pb.Current, curCount) + } else if curCount > 0 { + barBox += strings.Repeat(pb.Current, curCount-1) + pb.CurrentN + } + barBox += strings.Repeat(pb.Empty, emptCount) + pb.BarEnd + } else { + barBox = pb.BarStart + pos := size - int(current)%int(size) + if pos-1 > 0 { + barBox += strings.Repeat(pb.Empty, pos-1) + } + barBox += pb.Current + if size-pos-1 > 0 { + barBox += strings.Repeat(pb.Empty, size-pos-1) + } + barBox += pb.BarEnd + } + } + } + + // check len + out = pb.prefix + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix + if escapeAwareRuneCountInString(out) < width { + end = strings.Repeat(" ", width-utf8.RuneCountInString(out)) + } + + // and print! + pb.mu.Lock() + pb.lastPrint = out + end + pb.mu.Unlock() + switch { + case pb.isFinish: + return + case pb.Output != nil: + fmt.Fprint(pb.Output, "\r"+out+end) + case pb.Callback != nil: + pb.Callback(out + end) + case !pb.NotPrint: + fmt.Print("\r" + out + end) + } +} + +// GetTerminalWidth - returns terminal width for all platforms. +func GetTerminalWidth() (int, error) { + return terminalWidth() +} + +func (pb *ProgressBar) GetWidth() int { + if pb.ForceWidth { + return pb.Width + } + + width := pb.Width + termWidth, _ := terminalWidth() + if width == 0 || termWidth <= width { + width = termWidth + } + + return width +} + +// Write the current state of the progressbar +func (pb *ProgressBar) Update() { + c := atomic.LoadInt64(&pb.current) + if pb.AlwaysUpdate || c != pb.currentValue { + pb.write(c) + pb.currentValue = c + } + if pb.AutoStat { + if c == 0 { + pb.startTime = time.Now() + pb.startValue = 0 + } else if c >= pb.Total && pb.isFinish != true { + pb.Finish() + } + } +} + +func (pb *ProgressBar) String() string { + return pb.lastPrint +} + +// Internal loop for refreshing the progressbar +func (pb *ProgressBar) refresher() { + for { + select { + case <-pb.finish: + return + case <-time.After(pb.RefreshRate): + pb.Update() + } + } +} + +type window struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go new file mode 100644 index 000000000..c06097b43 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go @@ -0,0 +1,8 @@ +// +build linux darwin freebsd netbsd openbsd dragonfly +// +build !appengine + +package pb + +import "syscall" + +const sysIoctl = syscall.SYS_IOCTL diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go new file mode 100644 index 000000000..b7d461e17 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go @@ -0,0 +1,6 @@ +// +build solaris +// +build !appengine + +package pb + +const sysIoctl = 54 diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go new file mode 100644 index 000000000..72f682835 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go @@ -0,0 +1,141 @@ +// +build windows + +package pb + +import ( + "errors" + "fmt" + "os" + "sync" + "syscall" + "unsafe" +) + +var tty = os.Stdin + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + + // GetConsoleScreenBufferInfo retrieves information about the + // specified console screen buffer. + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + + // GetConsoleMode retrieves the current input mode of a console's + // input buffer or the current output mode of a console screen buffer. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + getConsoleMode = kernel32.NewProc("GetConsoleMode") + + // SetConsoleMode sets the input mode of a console's input buffer + // or the output mode of a console screen buffer. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + setConsoleMode = kernel32.NewProc("SetConsoleMode") + + // SetConsoleCursorPosition sets the cursor position in the + // specified console screen buffer. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx + setConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") +) + +type ( + // Defines the coordinates of the upper left and lower right corners + // of a rectangle. + // See + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311(v=vs.85).aspx + smallRect struct { + Left, Top, Right, Bottom int16 + } + + // Defines the coordinates of a character cell in a console screen + // buffer. The origin of the coordinate system (0,0) is at the top, left cell + // of the buffer. + // See + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119(v=vs.85).aspx + coordinates struct { + X, Y int16 + } + + word int16 + + // Contains information about a console screen buffer. + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx + consoleScreenBufferInfo struct { + dwSize coordinates + dwCursorPosition coordinates + wAttributes word + srWindow smallRect + dwMaximumWindowSize coordinates + } +) + +// terminalWidth returns width of the terminal. +func terminalWidth() (width int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, error(e) + } + return int(info.dwSize.X) - 1, nil +} + +func getCursorPos() (pos coordinates, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return info.dwCursorPosition, error(e) + } + return info.dwCursorPosition, nil +} + +func setCursorPos(pos coordinates) error { + _, _, e := syscall.Syscall(setConsoleCursorPosition.Addr(), 2, uintptr(syscall.Stdout), uintptr(uint32(uint16(pos.Y))<<16|uint32(uint16(pos.X))), 0) + if e != 0 { + return error(e) + } + return nil +} + +var ErrPoolWasStarted = errors.New("Bar pool was started") + +var echoLocked bool +var echoLockMutex sync.Mutex + +var oldState word + +func lockEcho() (quit chan int, err error) { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + if echoLocked { + err = ErrPoolWasStarted + return + } + echoLocked = true + + if _, _, e := syscall.Syscall(getConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&oldState)), 0); e != 0 { + err = fmt.Errorf("Can't get terminal settings: %v", e) + return + } + + newState := oldState + const ENABLE_ECHO_INPUT = 0x0004 + const ENABLE_LINE_INPUT = 0x0002 + newState = newState & (^(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT)) + if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(newState), 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings: %v", e) + return + } + return +} + +func unlockEcho() (err error) { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + if !echoLocked { + return + } + echoLocked = false + if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(oldState), 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings") + } + return +} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go new file mode 100644 index 000000000..12e6fe6e2 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go @@ -0,0 +1,110 @@ +// +build linux darwin freebsd netbsd openbsd solaris dragonfly +// +build !appengine + +package pb + +import ( + "errors" + "fmt" + "os" + "os/signal" + "runtime" + "sync" + "syscall" + "unsafe" +) + +const ( + TIOCGWINSZ = 0x5413 + TIOCGWINSZ_OSX = 1074295912 +) + +var tty *os.File + +var ErrPoolWasStarted = errors.New("Bar pool was started") + +var echoLocked bool +var echoLockMutex sync.Mutex + +func init() { + var err error + tty, err = os.Open("/dev/tty") + if err != nil { + tty = os.Stdin + } +} + +// terminalWidth returns width of the terminal. +func terminalWidth() (int, error) { + w := new(window) + tio := syscall.TIOCGWINSZ + if runtime.GOOS == "darwin" { + tio = TIOCGWINSZ_OSX + } + res, _, err := syscall.Syscall(sysIoctl, + tty.Fd(), + uintptr(tio), + uintptr(unsafe.Pointer(w)), + ) + if int(res) == -1 { + return 0, err + } + return int(w.Col), nil +} + +var oldState syscall.Termios + +func lockEcho() (quit chan int, err error) { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + if echoLocked { + err = ErrPoolWasStarted + return + } + echoLocked = true + + fd := tty.Fd() + if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 { + err = fmt.Errorf("Can't get terminal settings: %v", e) + return + } + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings: %v", e) + return + } + quit = make(chan int, 1) + go catchTerminate(quit) + return +} + +func unlockEcho() (err error) { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + if !echoLocked { + return + } + echoLocked = false + fd := tty.Fd() + if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings") + } + return +} + +// listen exit signals and restore terminal state +func catchTerminate(quit chan int) { + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL) + defer signal.Stop(sig) + select { + case <-quit: + unlockEcho() + case <-sig: + unlockEcho() + } +} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool.go new file mode 100644 index 000000000..0b4a4afa8 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pool.go @@ -0,0 +1,76 @@ +// +build linux darwin freebsd netbsd openbsd solaris dragonfly windows + +package pb + +import ( + "sync" + "time" +) + +// Create and start new pool with given bars +// You need call pool.Stop() after work +func StartPool(pbs ...*ProgressBar) (pool *Pool, err error) { + pool = new(Pool) + if err = pool.start(); err != nil { + return + } + pool.Add(pbs...) + return +} + +type Pool struct { + RefreshRate time.Duration + bars []*ProgressBar + quit chan int + finishOnce sync.Once +} + +// Add progress bars. +func (p *Pool) Add(pbs ...*ProgressBar) { + for _, bar := range pbs { + bar.ManualUpdate = true + bar.NotPrint = true + bar.Start() + p.bars = append(p.bars, bar) + } +} + +func (p *Pool) start() (err error) { + p.RefreshRate = DefaultRefreshRate + quit, err := lockEcho() + if err != nil { + return + } + p.quit = make(chan int) + go p.writer(quit) + return +} + +func (p *Pool) writer(finish chan int) { + var first = true + for { + select { + case <-time.After(p.RefreshRate): + if p.print(first) { + p.print(false) + finish <- 1 + return + } + first = false + case <-p.quit: + finish <- 1 + return + } + } +} + +// Restore terminal state and close pool +func (p *Pool) Stop() error { + // Wait until one final refresh has passed. + time.Sleep(p.RefreshRate) + + p.finishOnce.Do(func() { + close(p.quit) + }) + return unlockEcho() +} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go new file mode 100644 index 000000000..d7a5ace41 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go @@ -0,0 +1,35 @@ +// +build windows + +package pb + +import ( + "fmt" + "log" +) + +func (p *Pool) print(first bool) bool { + var out string + if !first { + coords, err := getCursorPos() + if err != nil { + log.Panic(err) + } + coords.Y -= int16(len(p.bars)) + coords.X = 0 + + err = setCursorPos(coords) + if err != nil { + log.Panic(err) + } + } + isFinished := true + for _, bar := range p.bars { + if !bar.isFinish { + isFinished = false + } + bar.Update() + out += fmt.Sprintf("\r%s\n", bar.String()) + } + fmt.Print(out) + return isFinished +} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go new file mode 100644 index 000000000..d95b71d87 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go @@ -0,0 +1,22 @@ +// +build linux darwin freebsd netbsd openbsd solaris dragonfly + +package pb + +import "fmt" + +func (p *Pool) print(first bool) bool { + var out string + if !first { + out = fmt.Sprintf("\033[%dA", len(p.bars)) + } + isFinished := true + for _, bar := range p.bars { + if !bar.isFinish { + isFinished = false + } + bar.Update() + out += fmt.Sprintf("\r%s\n", bar.String()) + } + fmt.Print(out) + return isFinished +} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/reader.go b/vendor/gopkg.in/cheggaaa/pb.v1/reader.go new file mode 100644 index 000000000..9f3148b54 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/reader.go @@ -0,0 +1,25 @@ +package pb + +import ( + "io" +) + +// It's proxy reader, implement io.Reader +type Reader struct { + io.Reader + bar *ProgressBar +} + +func (r *Reader) Read(p []byte) (n int, err error) { + n, err = r.Reader.Read(p) + r.bar.Add(n) + return +} + +// Close the reader when it implements io.Closer +func (r *Reader) Close() (err error) { + if closer, ok := r.Reader.(io.Closer); ok { + return closer.Close() + } + return +} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go b/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go new file mode 100644 index 000000000..d52edd365 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go @@ -0,0 +1,17 @@ +package pb + +import ( + "github.com/mattn/go-runewidth" + "regexp" +) + +// Finds the control character sequences (like colors) +var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d") + +func escapeAwareRuneCountInString(s string) int { + n := runewidth.StringWidth(s) + for _, sm := range ctrlFinder.FindAllString(s, -1) { + n -= len(sm) + } + return n +} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/termios_bsd.go b/vendor/gopkg.in/cheggaaa/pb.v1/termios_bsd.go new file mode 100644 index 000000000..517ea8ed7 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/termios_bsd.go @@ -0,0 +1,9 @@ +// +build darwin freebsd netbsd openbsd dragonfly +// +build !appengine + +package pb + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA +const ioctlWriteTermios = syscall.TIOCSETA diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go b/vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go new file mode 100644 index 000000000..ebb3fe87c --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go @@ -0,0 +1,7 @@ +// +build linux solaris +// +build !appengine + +package pb + +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS diff --git a/vendor/vendor.json b/vendor/vendor.json index d4d157eb9..eda4727e4 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1480,6 +1480,22 @@ "checksumSHA1": "U7dGDNwEHORvJFMoNSXErKE7ITg=", "path": "google.golang.org/cloud/internal", "revision": "5a3b06f8b5da3b7c3a93da43163b872c86c509ef" + }, + { + "checksumSHA1": "w3z2xM4+RT/OWvVusDgNXZZEEkE=", + "path": "gopkg.in/cheggaaa/pb.v1", + "revision": "d7e6ca3010b6f084d8056847f55d7f572f180678", + "revisionTime": "2016-11-21T09:29:06Z" + }, + { + "checksumSHA1": "gY2M/3SCxqgrPz+P/N6JQ+m/wnA=", + "path": "gopkg.in/xmlpath.v2", + "revision": "860cbeca3ebcc600db0b213c0e83ad6ce91f5739" + }, + { + "checksumSHA1": "0dDWcU08d0FS4d99NCgCkGLjjqw=", + "path": "gopkg.in/xmlpath.v2/cmd/webpath", + "revision": "860cbeca3ebcc600db0b213c0e83ad6ce91f5739" } ], "rootPath": "github.com/hashicorp/packer" From e42a23ecb5ec15489277f8208b2b76def45a65a9 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Wed, 1 Mar 2017 14:45:49 -0600 Subject: [PATCH 042/251] Ugh..added dependency for gopkg.in/cheggaaa/pb.v1: github.com/matn/go-runewidth. Also added github.com/jlaffaye/ftp for ftp support. --- vendor/github.com/jlaffaye/ftp/LICENSE | 13 + vendor/github.com/jlaffaye/ftp/README.md | 17 + vendor/github.com/jlaffaye/ftp/ftp.go | 671 +++++++++ vendor/github.com/jlaffaye/ftp/status.go | 106 ++ vendor/github.com/mattn/go-runewidth/LICENSE | 21 + .../github.com/mattn/go-runewidth/README.mkd | 27 + .../mattn/go-runewidth/runewidth.go | 1223 +++++++++++++++++ .../mattn/go-runewidth/runewidth_js.go | 8 + .../mattn/go-runewidth/runewidth_posix.go | 77 ++ .../mattn/go-runewidth/runewidth_windows.go | 25 + vendor/vendor.json | 12 + 11 files changed, 2200 insertions(+) create mode 100644 vendor/github.com/jlaffaye/ftp/LICENSE create mode 100644 vendor/github.com/jlaffaye/ftp/README.md create mode 100644 vendor/github.com/jlaffaye/ftp/ftp.go create mode 100644 vendor/github.com/jlaffaye/ftp/status.go create mode 100644 vendor/github.com/mattn/go-runewidth/LICENSE create mode 100644 vendor/github.com/mattn/go-runewidth/README.mkd create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_js.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_posix.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_windows.go diff --git a/vendor/github.com/jlaffaye/ftp/LICENSE b/vendor/github.com/jlaffaye/ftp/LICENSE new file mode 100644 index 000000000..9ab085c51 --- /dev/null +++ b/vendor/github.com/jlaffaye/ftp/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2011-2013, Julien Laffaye + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/jlaffaye/ftp/README.md b/vendor/github.com/jlaffaye/ftp/README.md new file mode 100644 index 000000000..b711be7ad --- /dev/null +++ b/vendor/github.com/jlaffaye/ftp/README.md @@ -0,0 +1,17 @@ +# goftp # + +[![Build Status](https://travis-ci.org/jlaffaye/ftp.svg?branch=master)](https://travis-ci.org/jlaffaye/ftp) +[![Coverage Status](https://coveralls.io/repos/jlaffaye/ftp/badge.svg?branch=master&service=github)](https://coveralls.io/github/jlaffaye/ftp?branch=master) +[![Go ReportCard](http://goreportcard.com/badge/jlaffaye/ftp)](http://goreportcard.com/report/jlaffaye/ftp) + +A FTP client package for Go + +## Install ## + +``` +go get -u github.com/jlaffaye/ftp +``` + +## Documentation ## + +http://godoc.org/github.com/jlaffaye/ftp diff --git a/vendor/github.com/jlaffaye/ftp/ftp.go b/vendor/github.com/jlaffaye/ftp/ftp.go new file mode 100644 index 000000000..db7ac9277 --- /dev/null +++ b/vendor/github.com/jlaffaye/ftp/ftp.go @@ -0,0 +1,671 @@ +// Package ftp implements a FTP client as described in RFC 959. +package ftp + +import ( + "bufio" + "errors" + "io" + "net" + "net/textproto" + "strconv" + "strings" + "time" +) + +// EntryType describes the different types of an Entry. +type EntryType int + +// The differents types of an Entry +const ( + EntryTypeFile EntryType = iota + EntryTypeFolder + EntryTypeLink +) + +// ServerConn represents the connection to a remote FTP server. +type ServerConn struct { + conn *textproto.Conn + host string + timeout time.Duration + features map[string]string +} + +// Entry describes a file and is returned by List(). +type Entry struct { + Name string + Type EntryType + Size uint64 + Time time.Time +} + +// response represent a data-connection +type response struct { + conn net.Conn + c *ServerConn +} + +// Connect is an alias to Dial, for backward compatibility +func Connect(addr string) (*ServerConn, error) { + return Dial(addr) +} + +// Dial is like DialTimeout with no timeout +func Dial(addr string) (*ServerConn, error) { + return DialTimeout(addr, 0) +} + +// DialTimeout initializes the connection to the specified ftp server address. +// +// It is generally followed by a call to Login() as most FTP commands require +// an authenticated user. +func DialTimeout(addr string, timeout time.Duration) (*ServerConn, error) { + tconn, err := net.DialTimeout("tcp", addr, timeout) + if err != nil { + return nil, err + } + + // Use the resolved IP address in case addr contains a domain name + // If we use the domain name, we might not resolve to the same IP. + remoteAddr := tconn.RemoteAddr().String() + host, _, err := net.SplitHostPort(remoteAddr) + if err != nil { + return nil, err + } + + conn := textproto.NewConn(tconn) + + c := &ServerConn{ + conn: conn, + host: host, + timeout: timeout, + features: make(map[string]string), + } + + _, _, err = c.conn.ReadResponse(StatusReady) + if err != nil { + c.Quit() + return nil, err + } + + err = c.feat() + if err != nil { + c.Quit() + return nil, err + } + + return c, nil +} + +// Login authenticates the client with specified user and password. +// +// "anonymous"/"anonymous" is a common user/password scheme for FTP servers +// that allows anonymous read-only accounts. +func (c *ServerConn) Login(user, password string) error { + code, message, err := c.cmd(-1, "USER %s", user) + if err != nil { + return err + } + + switch code { + case StatusLoggedIn: + case StatusUserOK: + _, _, err = c.cmd(StatusLoggedIn, "PASS %s", password) + if err != nil { + return err + } + default: + return errors.New(message) + } + + // Switch to binary mode + _, _, err = c.cmd(StatusCommandOK, "TYPE I") + if err != nil { + return err + } + + return nil +} + +// feat issues a FEAT FTP command to list the additional commands supported by +// the remote FTP server. +// FEAT is described in RFC 2389 +func (c *ServerConn) feat() error { + code, message, err := c.cmd(-1, "FEAT") + if err != nil { + return err + } + + if code != StatusSystem { + // The server does not support the FEAT command. This is not an + // error: we consider that there is no additional feature. + return nil + } + + lines := strings.Split(message, "\n") + for _, line := range lines { + if !strings.HasPrefix(line, " ") { + continue + } + + line = strings.TrimSpace(line) + featureElements := strings.SplitN(line, " ", 2) + + command := featureElements[0] + + var commandDesc string + if len(featureElements) == 2 { + commandDesc = featureElements[1] + } + + c.features[command] = commandDesc + } + + return nil +} + +// epsv issues an "EPSV" command to get a port number for a data connection. +func (c *ServerConn) epsv() (port int, err error) { + _, line, err := c.cmd(StatusExtendedPassiveMode, "EPSV") + if err != nil { + return + } + + start := strings.Index(line, "|||") + end := strings.LastIndex(line, "|") + if start == -1 || end == -1 { + err = errors.New("Invalid EPSV response format") + return + } + port, err = strconv.Atoi(line[start+3 : end]) + return +} + +// pasv issues a "PASV" command to get a port number for a data connection. +func (c *ServerConn) pasv() (port int, err error) { + _, line, err := c.cmd(StatusPassiveMode, "PASV") + if err != nil { + return + } + + // PASV response format : 227 Entering Passive Mode (h1,h2,h3,h4,p1,p2). + start := strings.Index(line, "(") + end := strings.LastIndex(line, ")") + if start == -1 || end == -1 { + return 0, errors.New("Invalid PASV response format") + } + + // We have to split the response string + pasvData := strings.Split(line[start+1:end], ",") + + if len(pasvData) < 6 { + return 0, errors.New("Invalid PASV response format") + } + + // Let's compute the port number + portPart1, err1 := strconv.Atoi(pasvData[4]) + if err1 != nil { + err = err1 + return + } + + portPart2, err2 := strconv.Atoi(pasvData[5]) + if err2 != nil { + err = err2 + return + } + + // Recompose port + port = portPart1*256 + portPart2 + return +} + +// openDataConn creates a new FTP data connection. +func (c *ServerConn) openDataConn() (net.Conn, error) { + var ( + port int + err error + ) + + if port, err = c.epsv(); err != nil { + if port, err = c.pasv(); err != nil { + return nil, err + } + } + + return net.DialTimeout("tcp", net.JoinHostPort(c.host, strconv.Itoa(port)), c.timeout) +} + +// cmd is a helper function to execute a command and check for the expected FTP +// return code +func (c *ServerConn) cmd(expected int, format string, args ...interface{}) (int, string, error) { + _, err := c.conn.Cmd(format, args...) + if err != nil { + return 0, "", err + } + + return c.conn.ReadResponse(expected) +} + +// cmdDataConnFrom executes a command which require a FTP data connection. +// Issues a REST FTP command to specify the number of bytes to skip for the transfer. +func (c *ServerConn) cmdDataConnFrom(offset uint64, format string, args ...interface{}) (net.Conn, error) { + conn, err := c.openDataConn() + if err != nil { + return nil, err + } + + if offset != 0 { + _, _, err := c.cmd(StatusRequestFilePending, "REST %d", offset) + if err != nil { + return nil, err + } + } + + _, err = c.conn.Cmd(format, args...) + if err != nil { + conn.Close() + return nil, err + } + + code, msg, err := c.conn.ReadResponse(-1) + if err != nil { + conn.Close() + return nil, err + } + if code != StatusAlreadyOpen && code != StatusAboutToSend { + conn.Close() + return nil, &textproto.Error{Code: code, Msg: msg} + } + + return conn, nil +} + +var errUnsupportedListLine = errors.New("Unsupported LIST line") + +// parseRFC3659ListLine parses the style of directory line defined in RFC 3659. +func parseRFC3659ListLine(line string) (*Entry, error) { + iSemicolon := strings.Index(line, ";") + iWhitespace := strings.Index(line, " ") + + if iSemicolon < 0 || iSemicolon > iWhitespace { + return nil, errUnsupportedListLine + } + + e := &Entry{ + Name: line[iWhitespace+1:], + } + + for _, field := range strings.Split(line[:iWhitespace-1], ";") { + i := strings.Index(field, "=") + if i < 1 { + return nil, errUnsupportedListLine + } + + key := field[:i] + value := field[i+1:] + + switch key { + case "modify": + var err error + e.Time, err = time.Parse("20060102150405", value) + if err != nil { + return nil, err + } + case "type": + switch value { + case "dir", "cdir", "pdir": + e.Type = EntryTypeFolder + case "file": + e.Type = EntryTypeFile + } + case "size": + e.setSize(value) + } + } + return e, nil +} + +// parseLsListLine parses a directory line in a format based on the output of +// the UNIX ls command. +func parseLsListLine(line string) (*Entry, error) { + fields := strings.Fields(line) + if len(fields) >= 7 && fields[1] == "folder" && fields[2] == "0" { + e := &Entry{ + Type: EntryTypeFolder, + Name: strings.Join(fields[6:], " "), + } + if err := e.setTime(fields[3:6]); err != nil { + return nil, err + } + + return e, nil + } + + if len(fields) < 8 { + return nil, errUnsupportedListLine + } + + if fields[1] == "0" { + e := &Entry{ + Type: EntryTypeFile, + Name: strings.Join(fields[7:], " "), + } + + if err := e.setSize(fields[2]); err != nil { + return nil, err + } + if err := e.setTime(fields[4:7]); err != nil { + return nil, err + } + + return e, nil + } + + if len(fields) < 9 { + return nil, errUnsupportedListLine + } + + e := &Entry{} + switch fields[0][0] { + case '-': + e.Type = EntryTypeFile + if err := e.setSize(fields[4]); err != nil { + return nil, err + } + case 'd': + e.Type = EntryTypeFolder + case 'l': + e.Type = EntryTypeLink + default: + return nil, errors.New("Unknown entry type") + } + + if err := e.setTime(fields[5:8]); err != nil { + return nil, err + } + + e.Name = strings.Join(fields[8:], " ") + return e, nil +} + +var dirTimeFormats = []string{ + "01-02-06 03:04PM", + "2006-01-02 15:04", +} + +// parseDirListLine parses a directory line in a format based on the output of +// the MS-DOS DIR command. +func parseDirListLine(line string) (*Entry, error) { + e := &Entry{} + var err error + + // Try various time formats that DIR might use, and stop when one works. + for _, format := range dirTimeFormats { + if len(line) > len(format) { + e.Time, err = time.Parse(format, line[:len(format)]) + if err == nil { + line = line[len(format):] + break + } + } + } + if err != nil { + // None of the time formats worked. + return nil, errUnsupportedListLine + } + + line = strings.TrimLeft(line, " ") + if strings.HasPrefix(line, "") { + e.Type = EntryTypeFolder + line = strings.TrimPrefix(line, "") + } else { + space := strings.Index(line, " ") + if space == -1 { + return nil, errUnsupportedListLine + } + e.Size, err = strconv.ParseUint(line[:space], 10, 64) + if err != nil { + return nil, errUnsupportedListLine + } + e.Type = EntryTypeFile + line = line[space:] + } + + e.Name = strings.TrimLeft(line, " ") + return e, nil +} + +var listLineParsers = []func(line string) (*Entry, error){ + parseRFC3659ListLine, + parseLsListLine, + parseDirListLine, +} + +// parseListLine parses the various non-standard format returned by the LIST +// FTP command. +func parseListLine(line string) (*Entry, error) { + for _, f := range listLineParsers { + e, err := f(line) + if err == errUnsupportedListLine { + // Try another format. + continue + } + return e, err + } + return nil, errUnsupportedListLine +} + +func (e *Entry) setSize(str string) (err error) { + e.Size, err = strconv.ParseUint(str, 0, 64) + return +} + +func (e *Entry) setTime(fields []string) (err error) { + var timeStr string + if strings.Contains(fields[2], ":") { // this year + thisYear, _, _ := time.Now().Date() + timeStr = fields[1] + " " + fields[0] + " " + strconv.Itoa(thisYear)[2:4] + " " + fields[2] + " GMT" + } else { // not this year + if len(fields[2]) != 4 { + return errors.New("Invalid year format in time string") + } + timeStr = fields[1] + " " + fields[0] + " " + fields[2][2:4] + " 00:00 GMT" + } + e.Time, err = time.Parse("_2 Jan 06 15:04 MST", timeStr) + return +} + +// NameList issues an NLST FTP command. +func (c *ServerConn) NameList(path string) (entries []string, err error) { + conn, err := c.cmdDataConnFrom(0, "NLST %s", path) + if err != nil { + return + } + + r := &response{conn, c} + defer r.Close() + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + entries = append(entries, scanner.Text()) + } + if err = scanner.Err(); err != nil { + return entries, err + } + return +} + +// List issues a LIST FTP command. +func (c *ServerConn) List(path string) (entries []*Entry, err error) { + conn, err := c.cmdDataConnFrom(0, "LIST %s", path) + if err != nil { + return + } + + r := &response{conn, c} + defer r.Close() + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + entry, err := parseListLine(line) + if err == nil { + entries = append(entries, entry) + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + return +} + +// ChangeDir issues a CWD FTP command, which changes the current directory to +// the specified path. +func (c *ServerConn) ChangeDir(path string) error { + _, _, err := c.cmd(StatusRequestedFileActionOK, "CWD %s", path) + return err +} + +// ChangeDirToParent issues a CDUP FTP command, which changes the current +// directory to the parent directory. This is similar to a call to ChangeDir +// with a path set to "..". +func (c *ServerConn) ChangeDirToParent() error { + _, _, err := c.cmd(StatusRequestedFileActionOK, "CDUP") + return err +} + +// CurrentDir issues a PWD FTP command, which Returns the path of the current +// directory. +func (c *ServerConn) CurrentDir() (string, error) { + _, msg, err := c.cmd(StatusPathCreated, "PWD") + if err != nil { + return "", err + } + + start := strings.Index(msg, "\"") + end := strings.LastIndex(msg, "\"") + + if start == -1 || end == -1 { + return "", errors.New("Unsuported PWD response format") + } + + return msg[start+1 : end], nil +} + +// Retr issues a RETR FTP command to fetch the specified file from the remote +// FTP server. +// +// The returned ReadCloser must be closed to cleanup the FTP data connection. +func (c *ServerConn) Retr(path string) (io.ReadCloser, error) { + return c.RetrFrom(path, 0) +} + +// RetrFrom issues a RETR FTP command to fetch the specified file from the remote +// FTP server, the server will not send the offset first bytes of the file. +// +// The returned ReadCloser must be closed to cleanup the FTP data connection. +func (c *ServerConn) RetrFrom(path string, offset uint64) (io.ReadCloser, error) { + conn, err := c.cmdDataConnFrom(offset, "RETR %s", path) + if err != nil { + return nil, err + } + + return &response{conn, c}, nil +} + +// Stor issues a STOR FTP command to store a file to the remote FTP server. +// Stor creates the specified file with the content of the io.Reader. +// +// Hint: io.Pipe() can be used if an io.Writer is required. +func (c *ServerConn) Stor(path string, r io.Reader) error { + return c.StorFrom(path, r, 0) +} + +// StorFrom issues a STOR FTP command to store a file to the remote FTP server. +// Stor creates the specified file with the content of the io.Reader, writing +// on the server will start at the given file offset. +// +// Hint: io.Pipe() can be used if an io.Writer is required. +func (c *ServerConn) StorFrom(path string, r io.Reader, offset uint64) error { + conn, err := c.cmdDataConnFrom(offset, "STOR %s", path) + if err != nil { + return err + } + + _, err = io.Copy(conn, r) + conn.Close() + if err != nil { + return err + } + + _, _, err = c.conn.ReadResponse(StatusClosingDataConnection) + return err +} + +// Rename renames a file on the remote FTP server. +func (c *ServerConn) Rename(from, to string) error { + _, _, err := c.cmd(StatusRequestFilePending, "RNFR %s", from) + if err != nil { + return err + } + + _, _, err = c.cmd(StatusRequestedFileActionOK, "RNTO %s", to) + return err +} + +// Delete issues a DELE FTP command to delete the specified file from the +// remote FTP server. +func (c *ServerConn) Delete(path string) error { + _, _, err := c.cmd(StatusRequestedFileActionOK, "DELE %s", path) + return err +} + +// MakeDir issues a MKD FTP command to create the specified directory on the +// remote FTP server. +func (c *ServerConn) MakeDir(path string) error { + _, _, err := c.cmd(StatusPathCreated, "MKD %s", path) + return err +} + +// RemoveDir issues a RMD FTP command to remove the specified directory from +// the remote FTP server. +func (c *ServerConn) RemoveDir(path string) error { + _, _, err := c.cmd(StatusRequestedFileActionOK, "RMD %s", path) + return err +} + +// NoOp issues a NOOP FTP command. +// NOOP has no effects and is usually used to prevent the remote FTP server to +// close the otherwise idle connection. +func (c *ServerConn) NoOp() error { + _, _, err := c.cmd(StatusCommandOK, "NOOP") + return err +} + +// Logout issues a REIN FTP command to logout the current user. +func (c *ServerConn) Logout() error { + _, _, err := c.cmd(StatusReady, "REIN") + return err +} + +// Quit issues a QUIT FTP command to properly close the connection from the +// remote FTP server. +func (c *ServerConn) Quit() error { + c.conn.Cmd("QUIT") + return c.conn.Close() +} + +// Read implements the io.Reader interface on a FTP data connection. +func (r *response) Read(buf []byte) (int, error) { + return r.conn.Read(buf) +} + +// Close implements the io.Closer interface on a FTP data connection. +func (r *response) Close() error { + err := r.conn.Close() + _, _, err2 := r.c.conn.ReadResponse(StatusClosingDataConnection) + if err2 != nil { + err = err2 + } + return err +} diff --git a/vendor/github.com/jlaffaye/ftp/status.go b/vendor/github.com/jlaffaye/ftp/status.go new file mode 100644 index 000000000..e90ca6211 --- /dev/null +++ b/vendor/github.com/jlaffaye/ftp/status.go @@ -0,0 +1,106 @@ +package ftp + +// FTP status codes, defined in RFC 959 +const ( + StatusInitiating = 100 + StatusRestartMarker = 110 + StatusReadyMinute = 120 + StatusAlreadyOpen = 125 + StatusAboutToSend = 150 + + StatusCommandOK = 200 + StatusCommandNotImplemented = 202 + StatusSystem = 211 + StatusDirectory = 212 + StatusFile = 213 + StatusHelp = 214 + StatusName = 215 + StatusReady = 220 + StatusClosing = 221 + StatusDataConnectionOpen = 225 + StatusClosingDataConnection = 226 + StatusPassiveMode = 227 + StatusLongPassiveMode = 228 + StatusExtendedPassiveMode = 229 + StatusLoggedIn = 230 + StatusLoggedOut = 231 + StatusLogoutAck = 232 + StatusRequestedFileActionOK = 250 + StatusPathCreated = 257 + + StatusUserOK = 331 + StatusLoginNeedAccount = 332 + StatusRequestFilePending = 350 + + StatusNotAvailable = 421 + StatusCanNotOpenDataConnection = 425 + StatusTransfertAborted = 426 + StatusInvalidCredentials = 430 + StatusHostUnavailable = 434 + StatusFileActionIgnored = 450 + StatusActionAborted = 451 + Status452 = 452 + + StatusBadCommand = 500 + StatusBadArguments = 501 + StatusNotImplemented = 502 + StatusBadSequence = 503 + StatusNotImplementedParameter = 504 + StatusNotLoggedIn = 530 + StatusStorNeedAccount = 532 + StatusFileUnavailable = 550 + StatusPageTypeUnknown = 551 + StatusExceededStorage = 552 + StatusBadFileName = 553 +) + +var statusText = map[int]string{ + // 200 + StatusCommandOK: "Command okay.", + StatusCommandNotImplemented: "Command not implemented, superfluous at this site.", + StatusSystem: "System status, or system help reply.", + StatusDirectory: "Directory status.", + StatusFile: "File status.", + StatusHelp: "Help message.", + StatusName: "", + StatusReady: "Service ready for new user.", + StatusClosing: "Service closing control connection.", + StatusDataConnectionOpen: "Data connection open; no transfer in progress.", + StatusClosingDataConnection: "Closing data connection. Requested file action successful.", + StatusPassiveMode: "Entering Passive Mode.", + StatusLongPassiveMode: "Entering Long Passive Mode.", + StatusExtendedPassiveMode: "Entering Extended Passive Mode.", + StatusLoggedIn: "User logged in, proceed.", + StatusLoggedOut: "User logged out; service terminated.", + StatusLogoutAck: "Logout command noted, will complete when transfer done.", + StatusRequestedFileActionOK: "Requested file action okay, completed.", + StatusPathCreated: "Path created.", + + // 300 + StatusUserOK: "User name okay, need password.", + StatusLoginNeedAccount: "Need account for login.", + StatusRequestFilePending: "Requested file action pending further information.", + + // 400 + StatusNotAvailable: "Service not available, closing control connection.", + StatusCanNotOpenDataConnection: "Can't open data connection.", + StatusTransfertAborted: "Connection closed; transfer aborted.", + StatusInvalidCredentials: "Invalid username or password.", + StatusHostUnavailable: "Requested host unavailable.", + StatusFileActionIgnored: "Requested file action not taken.", + StatusActionAborted: "Requested action aborted. Local error in processing.", + Status452: "Insufficient storage space in system.", + + // 500 + StatusBadCommand: "Command unrecognized.", + StatusBadArguments: "Syntax error in parameters or arguments.", + StatusNotImplemented: "Command not implemented.", + StatusBadSequence: "Bad sequence of commands.", + StatusNotImplementedParameter: "Command not implemented for that parameter.", + StatusNotLoggedIn: "Not logged in.", + StatusStorNeedAccount: "Need account for storing files.", + StatusFileUnavailable: "File unavailable.", + StatusPageTypeUnknown: "Page type unknown.", + StatusExceededStorage: "Exceeded storage allocation.", + StatusBadFileName: "File name not allowed.", +} diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE new file mode 100644 index 000000000..91b5cef30 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-runewidth/README.mkd b/vendor/github.com/mattn/go-runewidth/README.mkd new file mode 100644 index 000000000..66663a94b --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/README.mkd @@ -0,0 +1,27 @@ +go-runewidth +============ + +[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth) +[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD) +[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth) +[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth) + +Provides functions to get fixed width of the character or string. + +Usage +----- + +```go +runewidth.StringWidth("つのだ☆HIRO") == 12 +``` + + +Author +------ + +Yasuhiro Matsumoto + +License +------- + +under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go new file mode 100644 index 000000000..2164497ad --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -0,0 +1,1223 @@ +package runewidth + +var ( + // EastAsianWidth will be set true if the current locale is CJK + EastAsianWidth = IsEastAsian() + + // DefaultCondition is a condition in current locale + DefaultCondition = &Condition{EastAsianWidth} +) + +type interval struct { + first rune + last rune +} + +type table []interval + +func inTables(r rune, ts ...table) bool { + for _, t := range ts { + if inTable(r, t) { + return true + } + } + return false +} + +func inTable(r rune, t table) bool { + // func (t table) IncludesRune(r rune) bool { + if r < t[0].first { + return false + } + + bot := 0 + top := len(t) - 1 + for top >= bot { + mid := (bot + top) / 2 + + switch { + case t[mid].last < r: + bot = mid + 1 + case t[mid].first > r: + top = mid - 1 + default: + return true + } + } + + return false +} + +var private = table{ + {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD}, +} + +var nonprint = table{ + {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD}, + {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F}, + {0x202A, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF}, + {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF}, +} + +var combining = table{ + {0x0300, 0x036F}, {0x0483, 0x0489}, {0x0591, 0x05BD}, + {0x05BF, 0x05BF}, {0x05C1, 0x05C2}, {0x05C4, 0x05C5}, + {0x05C7, 0x05C7}, {0x0610, 0x061A}, {0x064B, 0x065F}, + {0x0670, 0x0670}, {0x06D6, 0x06DC}, {0x06DF, 0x06E4}, + {0x06E7, 0x06E8}, {0x06EA, 0x06ED}, {0x0711, 0x0711}, + {0x0730, 0x074A}, {0x07A6, 0x07B0}, {0x07EB, 0x07F3}, + {0x0816, 0x0819}, {0x081B, 0x0823}, {0x0825, 0x0827}, + {0x0829, 0x082D}, {0x0859, 0x085B}, {0x08D4, 0x08E1}, + {0x08E3, 0x0903}, {0x093A, 0x093C}, {0x093E, 0x094F}, + {0x0951, 0x0957}, {0x0962, 0x0963}, {0x0981, 0x0983}, + {0x09BC, 0x09BC}, {0x09BE, 0x09C4}, {0x09C7, 0x09C8}, + {0x09CB, 0x09CD}, {0x09D7, 0x09D7}, {0x09E2, 0x09E3}, + {0x0A01, 0x0A03}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, + {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, + {0x0A70, 0x0A71}, {0x0A75, 0x0A75}, {0x0A81, 0x0A83}, + {0x0ABC, 0x0ABC}, {0x0ABE, 0x0AC5}, {0x0AC7, 0x0AC9}, + {0x0ACB, 0x0ACD}, {0x0AE2, 0x0AE3}, {0x0B01, 0x0B03}, + {0x0B3C, 0x0B3C}, {0x0B3E, 0x0B44}, {0x0B47, 0x0B48}, + {0x0B4B, 0x0B4D}, {0x0B56, 0x0B57}, {0x0B62, 0x0B63}, + {0x0B82, 0x0B82}, {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8}, + {0x0BCA, 0x0BCD}, {0x0BD7, 0x0BD7}, {0x0C00, 0x0C03}, + {0x0C3E, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, + {0x0C55, 0x0C56}, {0x0C62, 0x0C63}, {0x0C81, 0x0C83}, + {0x0CBC, 0x0CBC}, {0x0CBE, 0x0CC4}, {0x0CC6, 0x0CC8}, + {0x0CCA, 0x0CCD}, {0x0CD5, 0x0CD6}, {0x0CE2, 0x0CE3}, + {0x0D01, 0x0D03}, {0x0D3E, 0x0D44}, {0x0D46, 0x0D48}, + {0x0D4A, 0x0D4D}, {0x0D57, 0x0D57}, {0x0D62, 0x0D63}, + {0x0D82, 0x0D83}, {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, + {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, {0x0DF2, 0x0DF3}, + {0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E}, + {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC}, + {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35}, + {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F3E, 0x0F3F}, + {0x0F71, 0x0F84}, {0x0F86, 0x0F87}, {0x0F8D, 0x0F97}, + {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102B, 0x103E}, + {0x1056, 0x1059}, {0x105E, 0x1060}, {0x1062, 0x1064}, + {0x1067, 0x106D}, {0x1071, 0x1074}, {0x1082, 0x108D}, + {0x108F, 0x108F}, {0x109A, 0x109D}, {0x135D, 0x135F}, + {0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753}, + {0x1772, 0x1773}, {0x17B4, 0x17D3}, {0x17DD, 0x17DD}, + {0x180B, 0x180D}, {0x1885, 0x1886}, {0x18A9, 0x18A9}, + {0x1920, 0x192B}, {0x1930, 0x193B}, {0x1A17, 0x1A1B}, + {0x1A55, 0x1A5E}, {0x1A60, 0x1A7C}, {0x1A7F, 0x1A7F}, + {0x1AB0, 0x1ABE}, {0x1B00, 0x1B04}, {0x1B34, 0x1B44}, + {0x1B6B, 0x1B73}, {0x1B80, 0x1B82}, {0x1BA1, 0x1BAD}, + {0x1BE6, 0x1BF3}, {0x1C24, 0x1C37}, {0x1CD0, 0x1CD2}, + {0x1CD4, 0x1CE8}, {0x1CED, 0x1CED}, {0x1CF2, 0x1CF4}, + {0x1CF8, 0x1CF9}, {0x1DC0, 0x1DF5}, {0x1DFB, 0x1DFF}, + {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2D7F, 0x2D7F}, + {0x2DE0, 0x2DFF}, {0x302A, 0x302F}, {0x3099, 0x309A}, + {0xA66F, 0xA672}, {0xA674, 0xA67D}, {0xA69E, 0xA69F}, + {0xA6F0, 0xA6F1}, {0xA802, 0xA802}, {0xA806, 0xA806}, + {0xA80B, 0xA80B}, {0xA823, 0xA827}, {0xA880, 0xA881}, + {0xA8B4, 0xA8C5}, {0xA8E0, 0xA8F1}, {0xA926, 0xA92D}, + {0xA947, 0xA953}, {0xA980, 0xA983}, {0xA9B3, 0xA9C0}, + {0xA9E5, 0xA9E5}, {0xAA29, 0xAA36}, {0xAA43, 0xAA43}, + {0xAA4C, 0xAA4D}, {0xAA7B, 0xAA7D}, {0xAAB0, 0xAAB0}, + {0xAAB2, 0xAAB4}, {0xAAB7, 0xAAB8}, {0xAABE, 0xAABF}, + {0xAAC1, 0xAAC1}, {0xAAEB, 0xAAEF}, {0xAAF5, 0xAAF6}, + {0xABE3, 0xABEA}, {0xABEC, 0xABED}, {0xFB1E, 0xFB1E}, + {0xFE00, 0xFE0F}, {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, + {0x102E0, 0x102E0}, {0x10376, 0x1037A}, {0x10A01, 0x10A03}, + {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, {0x10A38, 0x10A3A}, + {0x10A3F, 0x10A3F}, {0x10AE5, 0x10AE6}, {0x11000, 0x11002}, + {0x11038, 0x11046}, {0x1107F, 0x11082}, {0x110B0, 0x110BA}, + {0x11100, 0x11102}, {0x11127, 0x11134}, {0x11173, 0x11173}, + {0x11180, 0x11182}, {0x111B3, 0x111C0}, {0x111CA, 0x111CC}, + {0x1122C, 0x11237}, {0x1123E, 0x1123E}, {0x112DF, 0x112EA}, + {0x11300, 0x11303}, {0x1133C, 0x1133C}, {0x1133E, 0x11344}, + {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11357, 0x11357}, + {0x11362, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, + {0x11435, 0x11446}, {0x114B0, 0x114C3}, {0x115AF, 0x115B5}, + {0x115B8, 0x115C0}, {0x115DC, 0x115DD}, {0x11630, 0x11640}, + {0x116AB, 0x116B7}, {0x1171D, 0x1172B}, {0x11C2F, 0x11C36}, + {0x11C38, 0x11C3F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6}, + {0x16AF0, 0x16AF4}, {0x16B30, 0x16B36}, {0x16F51, 0x16F7E}, + {0x16F8F, 0x16F92}, {0x1BC9D, 0x1BC9E}, {0x1D165, 0x1D169}, + {0x1D16D, 0x1D172}, {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, + {0x1D1AA, 0x1D1AD}, {0x1D242, 0x1D244}, {0x1DA00, 0x1DA36}, + {0x1DA3B, 0x1DA6C}, {0x1DA75, 0x1DA75}, {0x1DA84, 0x1DA84}, + {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, + {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, + {0x1E026, 0x1E02A}, {0x1E8D0, 0x1E8D6}, {0x1E944, 0x1E94A}, + {0xE0100, 0xE01EF}, +} + +var doublewidth = table{ + {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A}, + {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3}, + {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653}, + {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1}, + {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5}, + {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA}, + {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA}, + {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B}, + {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E}, + {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797}, + {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C}, + {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99}, + {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB}, + {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF}, + {0x3105, 0x312D}, {0x3131, 0x318E}, {0x3190, 0x31BA}, + {0x31C0, 0x31E3}, {0x31F0, 0x321E}, {0x3220, 0x3247}, + {0x3250, 0x32FE}, {0x3300, 0x4DBF}, {0x4E00, 0xA48C}, + {0xA490, 0xA4C6}, {0xA960, 0xA97C}, {0xAC00, 0xD7A3}, + {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, {0xFE30, 0xFE52}, + {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, {0xFF01, 0xFF60}, + {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE0}, {0x17000, 0x187EC}, + {0x18800, 0x18AF2}, {0x1B000, 0x1B001}, {0x1F004, 0x1F004}, + {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, + {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, + {0x1F250, 0x1F251}, {0x1F300, 0x1F320}, {0x1F32D, 0x1F335}, + {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393}, {0x1F3A0, 0x1F3CA}, + {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0}, {0x1F3F4, 0x1F3F4}, + {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440}, {0x1F442, 0x1F4FC}, + {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E}, {0x1F550, 0x1F567}, + {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A4}, + {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, {0x1F6CC, 0x1F6CC}, + {0x1F6D0, 0x1F6D2}, {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6F6}, + {0x1F910, 0x1F91E}, {0x1F920, 0x1F927}, {0x1F930, 0x1F930}, + {0x1F933, 0x1F93E}, {0x1F940, 0x1F94B}, {0x1F950, 0x1F95E}, + {0x1F980, 0x1F991}, {0x1F9C0, 0x1F9C0}, {0x20000, 0x2FFFD}, + {0x30000, 0x3FFFD}, +} + +var ambiguous = table{ + {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8}, + {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4}, + {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6}, + {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1}, + {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED}, + {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA}, + {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101}, + {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B}, + {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133}, + {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144}, + {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153}, + {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE}, + {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4}, + {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA}, + {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261}, + {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB}, + {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB}, + {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F}, + {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1}, + {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F}, + {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016}, + {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022}, + {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033}, + {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E}, + {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084}, + {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105}, + {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116}, + {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B}, + {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B}, + {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199}, + {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4}, + {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203}, + {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F}, + {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A}, + {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225}, + {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237}, + {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C}, + {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267}, + {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283}, + {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299}, + {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312}, + {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573}, + {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1}, + {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7}, + {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8}, + {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5}, + {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609}, + {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E}, + {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661}, + {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D}, + {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF}, + {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1}, + {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1}, + {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC}, + {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F}, + {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF}, + {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A}, + {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D}, + {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, + {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, +} + +var emoji = table{ + {0x1F1E6, 0x1F1FF}, {0x1F321, 0x1F321}, {0x1F324, 0x1F32C}, + {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, {0x1F396, 0x1F397}, + {0x1F399, 0x1F39B}, {0x1F39E, 0x1F39F}, {0x1F3CB, 0x1F3CE}, + {0x1F3D4, 0x1F3DF}, {0x1F3F3, 0x1F3F5}, {0x1F3F7, 0x1F3F7}, + {0x1F43F, 0x1F43F}, {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FD}, + {0x1F549, 0x1F54A}, {0x1F56F, 0x1F570}, {0x1F573, 0x1F579}, + {0x1F587, 0x1F587}, {0x1F58A, 0x1F58D}, {0x1F590, 0x1F590}, + {0x1F5A5, 0x1F5A5}, {0x1F5A8, 0x1F5A8}, {0x1F5B1, 0x1F5B2}, + {0x1F5BC, 0x1F5BC}, {0x1F5C2, 0x1F5C4}, {0x1F5D1, 0x1F5D3}, + {0x1F5DC, 0x1F5DE}, {0x1F5E1, 0x1F5E1}, {0x1F5E3, 0x1F5E3}, + {0x1F5E8, 0x1F5E8}, {0x1F5EF, 0x1F5EF}, {0x1F5F3, 0x1F5F3}, + {0x1F5FA, 0x1F5FA}, {0x1F6CB, 0x1F6CF}, {0x1F6E0, 0x1F6E5}, + {0x1F6E9, 0x1F6E9}, {0x1F6F0, 0x1F6F0}, {0x1F6F3, 0x1F6F3}, +} + +var notassigned = table{ + {0x0378, 0x0379}, {0x0380, 0x0383}, {0x038B, 0x038B}, + {0x038D, 0x038D}, {0x03A2, 0x03A2}, {0x0530, 0x0530}, + {0x0557, 0x0558}, {0x0560, 0x0560}, {0x0588, 0x0588}, + {0x058B, 0x058C}, {0x0590, 0x0590}, {0x05C8, 0x05CF}, + {0x05EB, 0x05EF}, {0x05F5, 0x05FF}, {0x061D, 0x061D}, + {0x070E, 0x070E}, {0x074B, 0x074C}, {0x07B2, 0x07BF}, + {0x07FB, 0x07FF}, {0x082E, 0x082F}, {0x083F, 0x083F}, + {0x085C, 0x085D}, {0x085F, 0x089F}, {0x08B5, 0x08B5}, + {0x08BE, 0x08D3}, {0x0984, 0x0984}, {0x098D, 0x098E}, + {0x0991, 0x0992}, {0x09A9, 0x09A9}, {0x09B1, 0x09B1}, + {0x09B3, 0x09B5}, {0x09BA, 0x09BB}, {0x09C5, 0x09C6}, + {0x09C9, 0x09CA}, {0x09CF, 0x09D6}, {0x09D8, 0x09DB}, + {0x09DE, 0x09DE}, {0x09E4, 0x09E5}, {0x09FC, 0x0A00}, + {0x0A04, 0x0A04}, {0x0A0B, 0x0A0E}, {0x0A11, 0x0A12}, + {0x0A29, 0x0A29}, {0x0A31, 0x0A31}, {0x0A34, 0x0A34}, + {0x0A37, 0x0A37}, {0x0A3A, 0x0A3B}, {0x0A3D, 0x0A3D}, + {0x0A43, 0x0A46}, {0x0A49, 0x0A4A}, {0x0A4E, 0x0A50}, + {0x0A52, 0x0A58}, {0x0A5D, 0x0A5D}, {0x0A5F, 0x0A65}, + {0x0A76, 0x0A80}, {0x0A84, 0x0A84}, {0x0A8E, 0x0A8E}, + {0x0A92, 0x0A92}, {0x0AA9, 0x0AA9}, {0x0AB1, 0x0AB1}, + {0x0AB4, 0x0AB4}, {0x0ABA, 0x0ABB}, {0x0AC6, 0x0AC6}, + {0x0ACA, 0x0ACA}, {0x0ACE, 0x0ACF}, {0x0AD1, 0x0ADF}, + {0x0AE4, 0x0AE5}, {0x0AF2, 0x0AF8}, {0x0AFA, 0x0B00}, + {0x0B04, 0x0B04}, {0x0B0D, 0x0B0E}, {0x0B11, 0x0B12}, + {0x0B29, 0x0B29}, {0x0B31, 0x0B31}, {0x0B34, 0x0B34}, + {0x0B3A, 0x0B3B}, {0x0B45, 0x0B46}, {0x0B49, 0x0B4A}, + {0x0B4E, 0x0B55}, {0x0B58, 0x0B5B}, {0x0B5E, 0x0B5E}, + {0x0B64, 0x0B65}, {0x0B78, 0x0B81}, {0x0B84, 0x0B84}, + {0x0B8B, 0x0B8D}, {0x0B91, 0x0B91}, {0x0B96, 0x0B98}, + {0x0B9B, 0x0B9B}, {0x0B9D, 0x0B9D}, {0x0BA0, 0x0BA2}, + {0x0BA5, 0x0BA7}, {0x0BAB, 0x0BAD}, {0x0BBA, 0x0BBD}, + {0x0BC3, 0x0BC5}, {0x0BC9, 0x0BC9}, {0x0BCE, 0x0BCF}, + {0x0BD1, 0x0BD6}, {0x0BD8, 0x0BE5}, {0x0BFB, 0x0BFF}, + {0x0C04, 0x0C04}, {0x0C0D, 0x0C0D}, {0x0C11, 0x0C11}, + {0x0C29, 0x0C29}, {0x0C3A, 0x0C3C}, {0x0C45, 0x0C45}, + {0x0C49, 0x0C49}, {0x0C4E, 0x0C54}, {0x0C57, 0x0C57}, + {0x0C5B, 0x0C5F}, {0x0C64, 0x0C65}, {0x0C70, 0x0C77}, + {0x0C84, 0x0C84}, {0x0C8D, 0x0C8D}, {0x0C91, 0x0C91}, + {0x0CA9, 0x0CA9}, {0x0CB4, 0x0CB4}, {0x0CBA, 0x0CBB}, + {0x0CC5, 0x0CC5}, {0x0CC9, 0x0CC9}, {0x0CCE, 0x0CD4}, + {0x0CD7, 0x0CDD}, {0x0CDF, 0x0CDF}, {0x0CE4, 0x0CE5}, + {0x0CF0, 0x0CF0}, {0x0CF3, 0x0D00}, {0x0D04, 0x0D04}, + {0x0D0D, 0x0D0D}, {0x0D11, 0x0D11}, {0x0D3B, 0x0D3C}, + {0x0D45, 0x0D45}, {0x0D49, 0x0D49}, {0x0D50, 0x0D53}, + {0x0D64, 0x0D65}, {0x0D80, 0x0D81}, {0x0D84, 0x0D84}, + {0x0D97, 0x0D99}, {0x0DB2, 0x0DB2}, {0x0DBC, 0x0DBC}, + {0x0DBE, 0x0DBF}, {0x0DC7, 0x0DC9}, {0x0DCB, 0x0DCE}, + {0x0DD5, 0x0DD5}, {0x0DD7, 0x0DD7}, {0x0DE0, 0x0DE5}, + {0x0DF0, 0x0DF1}, {0x0DF5, 0x0E00}, {0x0E3B, 0x0E3E}, + {0x0E5C, 0x0E80}, {0x0E83, 0x0E83}, {0x0E85, 0x0E86}, + {0x0E89, 0x0E89}, {0x0E8B, 0x0E8C}, {0x0E8E, 0x0E93}, + {0x0E98, 0x0E98}, {0x0EA0, 0x0EA0}, {0x0EA4, 0x0EA4}, + {0x0EA6, 0x0EA6}, {0x0EA8, 0x0EA9}, {0x0EAC, 0x0EAC}, + {0x0EBA, 0x0EBA}, {0x0EBE, 0x0EBF}, {0x0EC5, 0x0EC5}, + {0x0EC7, 0x0EC7}, {0x0ECE, 0x0ECF}, {0x0EDA, 0x0EDB}, + {0x0EE0, 0x0EFF}, {0x0F48, 0x0F48}, {0x0F6D, 0x0F70}, + {0x0F98, 0x0F98}, {0x0FBD, 0x0FBD}, {0x0FCD, 0x0FCD}, + {0x0FDB, 0x0FFF}, {0x10C6, 0x10C6}, {0x10C8, 0x10CC}, + {0x10CE, 0x10CF}, {0x1249, 0x1249}, {0x124E, 0x124F}, + {0x1257, 0x1257}, {0x1259, 0x1259}, {0x125E, 0x125F}, + {0x1289, 0x1289}, {0x128E, 0x128F}, {0x12B1, 0x12B1}, + {0x12B6, 0x12B7}, {0x12BF, 0x12BF}, {0x12C1, 0x12C1}, + {0x12C6, 0x12C7}, {0x12D7, 0x12D7}, {0x1311, 0x1311}, + {0x1316, 0x1317}, {0x135B, 0x135C}, {0x137D, 0x137F}, + {0x139A, 0x139F}, {0x13F6, 0x13F7}, {0x13FE, 0x13FF}, + {0x169D, 0x169F}, {0x16F9, 0x16FF}, {0x170D, 0x170D}, + {0x1715, 0x171F}, {0x1737, 0x173F}, {0x1754, 0x175F}, + {0x176D, 0x176D}, {0x1771, 0x1771}, {0x1774, 0x177F}, + {0x17DE, 0x17DF}, {0x17EA, 0x17EF}, {0x17FA, 0x17FF}, + {0x180F, 0x180F}, {0x181A, 0x181F}, {0x1878, 0x187F}, + {0x18AB, 0x18AF}, {0x18F6, 0x18FF}, {0x191F, 0x191F}, + {0x192C, 0x192F}, {0x193C, 0x193F}, {0x1941, 0x1943}, + {0x196E, 0x196F}, {0x1975, 0x197F}, {0x19AC, 0x19AF}, + {0x19CA, 0x19CF}, {0x19DB, 0x19DD}, {0x1A1C, 0x1A1D}, + {0x1A5F, 0x1A5F}, {0x1A7D, 0x1A7E}, {0x1A8A, 0x1A8F}, + {0x1A9A, 0x1A9F}, {0x1AAE, 0x1AAF}, {0x1ABF, 0x1AFF}, + {0x1B4C, 0x1B4F}, {0x1B7D, 0x1B7F}, {0x1BF4, 0x1BFB}, + {0x1C38, 0x1C3A}, {0x1C4A, 0x1C4C}, {0x1C89, 0x1CBF}, + {0x1CC8, 0x1CCF}, {0x1CF7, 0x1CF7}, {0x1CFA, 0x1CFF}, + {0x1DF6, 0x1DFA}, {0x1F16, 0x1F17}, {0x1F1E, 0x1F1F}, + {0x1F46, 0x1F47}, {0x1F4E, 0x1F4F}, {0x1F58, 0x1F58}, + {0x1F5A, 0x1F5A}, {0x1F5C, 0x1F5C}, {0x1F5E, 0x1F5E}, + {0x1F7E, 0x1F7F}, {0x1FB5, 0x1FB5}, {0x1FC5, 0x1FC5}, + {0x1FD4, 0x1FD5}, {0x1FDC, 0x1FDC}, {0x1FF0, 0x1FF1}, + {0x1FF5, 0x1FF5}, {0x1FFF, 0x1FFF}, {0x2065, 0x2065}, + {0x2072, 0x2073}, {0x208F, 0x208F}, {0x209D, 0x209F}, + {0x20BF, 0x20CF}, {0x20F1, 0x20FF}, {0x218C, 0x218F}, + {0x23FF, 0x23FF}, {0x2427, 0x243F}, {0x244B, 0x245F}, + {0x2B74, 0x2B75}, {0x2B96, 0x2B97}, {0x2BBA, 0x2BBC}, + {0x2BC9, 0x2BC9}, {0x2BD2, 0x2BEB}, {0x2BF0, 0x2BFF}, + {0x2C2F, 0x2C2F}, {0x2C5F, 0x2C5F}, {0x2CF4, 0x2CF8}, + {0x2D26, 0x2D26}, {0x2D28, 0x2D2C}, {0x2D2E, 0x2D2F}, + {0x2D68, 0x2D6E}, {0x2D71, 0x2D7E}, {0x2D97, 0x2D9F}, + {0x2DA7, 0x2DA7}, {0x2DAF, 0x2DAF}, {0x2DB7, 0x2DB7}, + {0x2DBF, 0x2DBF}, {0x2DC7, 0x2DC7}, {0x2DCF, 0x2DCF}, + {0x2DD7, 0x2DD7}, {0x2DDF, 0x2DDF}, {0x2E45, 0x2E7F}, + {0x2E9A, 0x2E9A}, {0x2EF4, 0x2EFF}, {0x2FD6, 0x2FEF}, + {0x2FFC, 0x2FFF}, {0x3040, 0x3040}, {0x3097, 0x3098}, + {0x3100, 0x3104}, {0x312E, 0x3130}, {0x318F, 0x318F}, + {0x31BB, 0x31BF}, {0x31E4, 0x31EF}, {0x321F, 0x321F}, + {0x32FF, 0x32FF}, {0x4DB6, 0x4DBF}, {0x9FD6, 0x9FFF}, + {0xA48D, 0xA48F}, {0xA4C7, 0xA4CF}, {0xA62C, 0xA63F}, + {0xA6F8, 0xA6FF}, {0xA7AF, 0xA7AF}, {0xA7B8, 0xA7F6}, + {0xA82C, 0xA82F}, {0xA83A, 0xA83F}, {0xA878, 0xA87F}, + {0xA8C6, 0xA8CD}, {0xA8DA, 0xA8DF}, {0xA8FE, 0xA8FF}, + {0xA954, 0xA95E}, {0xA97D, 0xA97F}, {0xA9CE, 0xA9CE}, + {0xA9DA, 0xA9DD}, {0xA9FF, 0xA9FF}, {0xAA37, 0xAA3F}, + {0xAA4E, 0xAA4F}, {0xAA5A, 0xAA5B}, {0xAAC3, 0xAADA}, + {0xAAF7, 0xAB00}, {0xAB07, 0xAB08}, {0xAB0F, 0xAB10}, + {0xAB17, 0xAB1F}, {0xAB27, 0xAB27}, {0xAB2F, 0xAB2F}, + {0xAB66, 0xAB6F}, {0xABEE, 0xABEF}, {0xABFA, 0xABFF}, + {0xD7A4, 0xD7AF}, {0xD7C7, 0xD7CA}, {0xD7FC, 0xD7FF}, + {0xFA6E, 0xFA6F}, {0xFADA, 0xFAFF}, {0xFB07, 0xFB12}, + {0xFB18, 0xFB1C}, {0xFB37, 0xFB37}, {0xFB3D, 0xFB3D}, + {0xFB3F, 0xFB3F}, {0xFB42, 0xFB42}, {0xFB45, 0xFB45}, + {0xFBC2, 0xFBD2}, {0xFD40, 0xFD4F}, {0xFD90, 0xFD91}, + {0xFDC8, 0xFDEF}, {0xFDFE, 0xFDFF}, {0xFE1A, 0xFE1F}, + {0xFE53, 0xFE53}, {0xFE67, 0xFE67}, {0xFE6C, 0xFE6F}, + {0xFE75, 0xFE75}, {0xFEFD, 0xFEFE}, {0xFF00, 0xFF00}, + {0xFFBF, 0xFFC1}, {0xFFC8, 0xFFC9}, {0xFFD0, 0xFFD1}, + {0xFFD8, 0xFFD9}, {0xFFDD, 0xFFDF}, {0xFFE7, 0xFFE7}, + {0xFFEF, 0xFFF8}, {0xFFFE, 0xFFFF}, {0x1000C, 0x1000C}, + {0x10027, 0x10027}, {0x1003B, 0x1003B}, {0x1003E, 0x1003E}, + {0x1004E, 0x1004F}, {0x1005E, 0x1007F}, {0x100FB, 0x100FF}, + {0x10103, 0x10106}, {0x10134, 0x10136}, {0x1018F, 0x1018F}, + {0x1019C, 0x1019F}, {0x101A1, 0x101CF}, {0x101FE, 0x1027F}, + {0x1029D, 0x1029F}, {0x102D1, 0x102DF}, {0x102FC, 0x102FF}, + {0x10324, 0x1032F}, {0x1034B, 0x1034F}, {0x1037B, 0x1037F}, + {0x1039E, 0x1039E}, {0x103C4, 0x103C7}, {0x103D6, 0x103FF}, + {0x1049E, 0x1049F}, {0x104AA, 0x104AF}, {0x104D4, 0x104D7}, + {0x104FC, 0x104FF}, {0x10528, 0x1052F}, {0x10564, 0x1056E}, + {0x10570, 0x105FF}, {0x10737, 0x1073F}, {0x10756, 0x1075F}, + {0x10768, 0x107FF}, {0x10806, 0x10807}, {0x10809, 0x10809}, + {0x10836, 0x10836}, {0x10839, 0x1083B}, {0x1083D, 0x1083E}, + {0x10856, 0x10856}, {0x1089F, 0x108A6}, {0x108B0, 0x108DF}, + {0x108F3, 0x108F3}, {0x108F6, 0x108FA}, {0x1091C, 0x1091E}, + {0x1093A, 0x1093E}, {0x10940, 0x1097F}, {0x109B8, 0x109BB}, + {0x109D0, 0x109D1}, {0x10A04, 0x10A04}, {0x10A07, 0x10A0B}, + {0x10A14, 0x10A14}, {0x10A18, 0x10A18}, {0x10A34, 0x10A37}, + {0x10A3B, 0x10A3E}, {0x10A48, 0x10A4F}, {0x10A59, 0x10A5F}, + {0x10AA0, 0x10ABF}, {0x10AE7, 0x10AEA}, {0x10AF7, 0x10AFF}, + {0x10B36, 0x10B38}, {0x10B56, 0x10B57}, {0x10B73, 0x10B77}, + {0x10B92, 0x10B98}, {0x10B9D, 0x10BA8}, {0x10BB0, 0x10BFF}, + {0x10C49, 0x10C7F}, {0x10CB3, 0x10CBF}, {0x10CF3, 0x10CF9}, + {0x10D00, 0x10E5F}, {0x10E7F, 0x10FFF}, {0x1104E, 0x11051}, + {0x11070, 0x1107E}, {0x110C2, 0x110CF}, {0x110E9, 0x110EF}, + {0x110FA, 0x110FF}, {0x11135, 0x11135}, {0x11144, 0x1114F}, + {0x11177, 0x1117F}, {0x111CE, 0x111CF}, {0x111E0, 0x111E0}, + {0x111F5, 0x111FF}, {0x11212, 0x11212}, {0x1123F, 0x1127F}, + {0x11287, 0x11287}, {0x11289, 0x11289}, {0x1128E, 0x1128E}, + {0x1129E, 0x1129E}, {0x112AA, 0x112AF}, {0x112EB, 0x112EF}, + {0x112FA, 0x112FF}, {0x11304, 0x11304}, {0x1130D, 0x1130E}, + {0x11311, 0x11312}, {0x11329, 0x11329}, {0x11331, 0x11331}, + {0x11334, 0x11334}, {0x1133A, 0x1133B}, {0x11345, 0x11346}, + {0x11349, 0x1134A}, {0x1134E, 0x1134F}, {0x11351, 0x11356}, + {0x11358, 0x1135C}, {0x11364, 0x11365}, {0x1136D, 0x1136F}, + {0x11375, 0x113FF}, {0x1145A, 0x1145A}, {0x1145C, 0x1145C}, + {0x1145E, 0x1147F}, {0x114C8, 0x114CF}, {0x114DA, 0x1157F}, + {0x115B6, 0x115B7}, {0x115DE, 0x115FF}, {0x11645, 0x1164F}, + {0x1165A, 0x1165F}, {0x1166D, 0x1167F}, {0x116B8, 0x116BF}, + {0x116CA, 0x116FF}, {0x1171A, 0x1171C}, {0x1172C, 0x1172F}, + {0x11740, 0x1189F}, {0x118F3, 0x118FE}, {0x11900, 0x11ABF}, + {0x11AF9, 0x11BFF}, {0x11C09, 0x11C09}, {0x11C37, 0x11C37}, + {0x11C46, 0x11C4F}, {0x11C6D, 0x11C6F}, {0x11C90, 0x11C91}, + {0x11CA8, 0x11CA8}, {0x11CB7, 0x11FFF}, {0x1239A, 0x123FF}, + {0x1246F, 0x1246F}, {0x12475, 0x1247F}, {0x12544, 0x12FFF}, + {0x1342F, 0x143FF}, {0x14647, 0x167FF}, {0x16A39, 0x16A3F}, + {0x16A5F, 0x16A5F}, {0x16A6A, 0x16A6D}, {0x16A70, 0x16ACF}, + {0x16AEE, 0x16AEF}, {0x16AF6, 0x16AFF}, {0x16B46, 0x16B4F}, + {0x16B5A, 0x16B5A}, {0x16B62, 0x16B62}, {0x16B78, 0x16B7C}, + {0x16B90, 0x16EFF}, {0x16F45, 0x16F4F}, {0x16F7F, 0x16F8E}, + {0x16FA0, 0x16FDF}, {0x16FE1, 0x16FFF}, {0x187ED, 0x187FF}, + {0x18AF3, 0x1AFFF}, {0x1B002, 0x1BBFF}, {0x1BC6B, 0x1BC6F}, + {0x1BC7D, 0x1BC7F}, {0x1BC89, 0x1BC8F}, {0x1BC9A, 0x1BC9B}, + {0x1BCA4, 0x1CFFF}, {0x1D0F6, 0x1D0FF}, {0x1D127, 0x1D128}, + {0x1D1E9, 0x1D1FF}, {0x1D246, 0x1D2FF}, {0x1D357, 0x1D35F}, + {0x1D372, 0x1D3FF}, {0x1D455, 0x1D455}, {0x1D49D, 0x1D49D}, + {0x1D4A0, 0x1D4A1}, {0x1D4A3, 0x1D4A4}, {0x1D4A7, 0x1D4A8}, + {0x1D4AD, 0x1D4AD}, {0x1D4BA, 0x1D4BA}, {0x1D4BC, 0x1D4BC}, + {0x1D4C4, 0x1D4C4}, {0x1D506, 0x1D506}, {0x1D50B, 0x1D50C}, + {0x1D515, 0x1D515}, {0x1D51D, 0x1D51D}, {0x1D53A, 0x1D53A}, + {0x1D53F, 0x1D53F}, {0x1D545, 0x1D545}, {0x1D547, 0x1D549}, + {0x1D551, 0x1D551}, {0x1D6A6, 0x1D6A7}, {0x1D7CC, 0x1D7CD}, + {0x1DA8C, 0x1DA9A}, {0x1DAA0, 0x1DAA0}, {0x1DAB0, 0x1DFFF}, + {0x1E007, 0x1E007}, {0x1E019, 0x1E01A}, {0x1E022, 0x1E022}, + {0x1E025, 0x1E025}, {0x1E02B, 0x1E7FF}, {0x1E8C5, 0x1E8C6}, + {0x1E8D7, 0x1E8FF}, {0x1E94B, 0x1E94F}, {0x1E95A, 0x1E95D}, + {0x1E960, 0x1EDFF}, {0x1EE04, 0x1EE04}, {0x1EE20, 0x1EE20}, + {0x1EE23, 0x1EE23}, {0x1EE25, 0x1EE26}, {0x1EE28, 0x1EE28}, + {0x1EE33, 0x1EE33}, {0x1EE38, 0x1EE38}, {0x1EE3A, 0x1EE3A}, + {0x1EE3C, 0x1EE41}, {0x1EE43, 0x1EE46}, {0x1EE48, 0x1EE48}, + {0x1EE4A, 0x1EE4A}, {0x1EE4C, 0x1EE4C}, {0x1EE50, 0x1EE50}, + {0x1EE53, 0x1EE53}, {0x1EE55, 0x1EE56}, {0x1EE58, 0x1EE58}, + {0x1EE5A, 0x1EE5A}, {0x1EE5C, 0x1EE5C}, {0x1EE5E, 0x1EE5E}, + {0x1EE60, 0x1EE60}, {0x1EE63, 0x1EE63}, {0x1EE65, 0x1EE66}, + {0x1EE6B, 0x1EE6B}, {0x1EE73, 0x1EE73}, {0x1EE78, 0x1EE78}, + {0x1EE7D, 0x1EE7D}, {0x1EE7F, 0x1EE7F}, {0x1EE8A, 0x1EE8A}, + {0x1EE9C, 0x1EEA0}, {0x1EEA4, 0x1EEA4}, {0x1EEAA, 0x1EEAA}, + {0x1EEBC, 0x1EEEF}, {0x1EEF2, 0x1EFFF}, {0x1F02C, 0x1F02F}, + {0x1F094, 0x1F09F}, {0x1F0AF, 0x1F0B0}, {0x1F0C0, 0x1F0C0}, + {0x1F0D0, 0x1F0D0}, {0x1F0F6, 0x1F0FF}, {0x1F10D, 0x1F10F}, + {0x1F12F, 0x1F12F}, {0x1F16C, 0x1F16F}, {0x1F1AD, 0x1F1E5}, + {0x1F203, 0x1F20F}, {0x1F23C, 0x1F23F}, {0x1F249, 0x1F24F}, + {0x1F252, 0x1F2FF}, {0x1F6D3, 0x1F6DF}, {0x1F6ED, 0x1F6EF}, + {0x1F6F7, 0x1F6FF}, {0x1F774, 0x1F77F}, {0x1F7D5, 0x1F7FF}, + {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, {0x1F85A, 0x1F85F}, + {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F90F}, {0x1F91F, 0x1F91F}, + {0x1F928, 0x1F92F}, {0x1F931, 0x1F932}, {0x1F93F, 0x1F93F}, + {0x1F94C, 0x1F94F}, {0x1F95F, 0x1F97F}, {0x1F992, 0x1F9BF}, + {0x1F9C1, 0x1FFFF}, {0x2A6D7, 0x2A6FF}, {0x2B735, 0x2B73F}, + {0x2B81E, 0x2B81F}, {0x2CEA2, 0x2F7FF}, {0x2FA1E, 0xE0000}, + {0xE0002, 0xE001F}, {0xE0080, 0xE00FF}, {0xE01F0, 0xEFFFF}, + {0xFFFFE, 0xFFFFF}, +} + +var neutral = table{ + {0x0000, 0x001F}, {0x007F, 0x007F}, {0x0080, 0x009F}, + {0x00A0, 0x00A0}, {0x00A9, 0x00A9}, {0x00AB, 0x00AB}, + {0x00B5, 0x00B5}, {0x00BB, 0x00BB}, {0x00C0, 0x00C5}, + {0x00C7, 0x00CF}, {0x00D1, 0x00D6}, {0x00D9, 0x00DD}, + {0x00E2, 0x00E5}, {0x00E7, 0x00E7}, {0x00EB, 0x00EB}, + {0x00EE, 0x00EF}, {0x00F1, 0x00F1}, {0x00F4, 0x00F6}, + {0x00FB, 0x00FB}, {0x00FD, 0x00FD}, {0x00FF, 0x00FF}, + {0x0100, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112}, + {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A}, + {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E}, + {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C}, + {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A}, + {0x016C, 0x017F}, {0x0180, 0x01BA}, {0x01BB, 0x01BB}, + {0x01BC, 0x01BF}, {0x01C0, 0x01C3}, {0x01C4, 0x01CD}, + {0x01CF, 0x01CF}, {0x01D1, 0x01D1}, {0x01D3, 0x01D3}, + {0x01D5, 0x01D5}, {0x01D7, 0x01D7}, {0x01D9, 0x01D9}, + {0x01DB, 0x01DB}, {0x01DD, 0x024F}, {0x0250, 0x0250}, + {0x0252, 0x0260}, {0x0262, 0x0293}, {0x0294, 0x0294}, + {0x0295, 0x02AF}, {0x02B0, 0x02C1}, {0x02C2, 0x02C3}, + {0x02C5, 0x02C5}, {0x02C6, 0x02C6}, {0x02C8, 0x02C8}, + {0x02CC, 0x02CC}, {0x02CE, 0x02CF}, {0x02D1, 0x02D1}, + {0x02D2, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE}, + {0x02E0, 0x02E4}, {0x02E5, 0x02EB}, {0x02EC, 0x02EC}, + {0x02ED, 0x02ED}, {0x02EE, 0x02EE}, {0x02EF, 0x02FF}, + {0x0370, 0x0373}, {0x0374, 0x0374}, {0x0375, 0x0375}, + {0x0376, 0x0377}, {0x037A, 0x037A}, {0x037B, 0x037D}, + {0x037E, 0x037E}, {0x037F, 0x037F}, {0x0384, 0x0385}, + {0x0386, 0x0386}, {0x0387, 0x0387}, {0x0388, 0x038A}, + {0x038C, 0x038C}, {0x038E, 0x0390}, {0x03AA, 0x03B0}, + {0x03C2, 0x03C2}, {0x03CA, 0x03F5}, {0x03F6, 0x03F6}, + {0x03F7, 0x03FF}, {0x0400, 0x0400}, {0x0402, 0x040F}, + {0x0450, 0x0450}, {0x0452, 0x0481}, {0x0482, 0x0482}, + {0x0483, 0x0487}, {0x0488, 0x0489}, {0x048A, 0x04FF}, + {0x0500, 0x052F}, {0x0531, 0x0556}, {0x0559, 0x0559}, + {0x055A, 0x055F}, {0x0561, 0x0587}, {0x0589, 0x0589}, + {0x058A, 0x058A}, {0x058D, 0x058E}, {0x058F, 0x058F}, + {0x0591, 0x05BD}, {0x05BE, 0x05BE}, {0x05BF, 0x05BF}, + {0x05C0, 0x05C0}, {0x05C1, 0x05C2}, {0x05C3, 0x05C3}, + {0x05C4, 0x05C5}, {0x05C6, 0x05C6}, {0x05C7, 0x05C7}, + {0x05D0, 0x05EA}, {0x05F0, 0x05F2}, {0x05F3, 0x05F4}, + {0x0600, 0x0605}, {0x0606, 0x0608}, {0x0609, 0x060A}, + {0x060B, 0x060B}, {0x060C, 0x060D}, {0x060E, 0x060F}, + {0x0610, 0x061A}, {0x061B, 0x061B}, {0x061C, 0x061C}, + {0x061E, 0x061F}, {0x0620, 0x063F}, {0x0640, 0x0640}, + {0x0641, 0x064A}, {0x064B, 0x065F}, {0x0660, 0x0669}, + {0x066A, 0x066D}, {0x066E, 0x066F}, {0x0670, 0x0670}, + {0x0671, 0x06D3}, {0x06D4, 0x06D4}, {0x06D5, 0x06D5}, + {0x06D6, 0x06DC}, {0x06DD, 0x06DD}, {0x06DE, 0x06DE}, + {0x06DF, 0x06E4}, {0x06E5, 0x06E6}, {0x06E7, 0x06E8}, + {0x06E9, 0x06E9}, {0x06EA, 0x06ED}, {0x06EE, 0x06EF}, + {0x06F0, 0x06F9}, {0x06FA, 0x06FC}, {0x06FD, 0x06FE}, + {0x06FF, 0x06FF}, {0x0700, 0x070D}, {0x070F, 0x070F}, + {0x0710, 0x0710}, {0x0711, 0x0711}, {0x0712, 0x072F}, + {0x0730, 0x074A}, {0x074D, 0x074F}, {0x0750, 0x077F}, + {0x0780, 0x07A5}, {0x07A6, 0x07B0}, {0x07B1, 0x07B1}, + {0x07C0, 0x07C9}, {0x07CA, 0x07EA}, {0x07EB, 0x07F3}, + {0x07F4, 0x07F5}, {0x07F6, 0x07F6}, {0x07F7, 0x07F9}, + {0x07FA, 0x07FA}, {0x0800, 0x0815}, {0x0816, 0x0819}, + {0x081A, 0x081A}, {0x081B, 0x0823}, {0x0824, 0x0824}, + {0x0825, 0x0827}, {0x0828, 0x0828}, {0x0829, 0x082D}, + {0x0830, 0x083E}, {0x0840, 0x0858}, {0x0859, 0x085B}, + {0x085E, 0x085E}, {0x08A0, 0x08B4}, {0x08B6, 0x08BD}, + {0x08D4, 0x08E1}, {0x08E2, 0x08E2}, {0x08E3, 0x08FF}, + {0x0900, 0x0902}, {0x0903, 0x0903}, {0x0904, 0x0939}, + {0x093A, 0x093A}, {0x093B, 0x093B}, {0x093C, 0x093C}, + {0x093D, 0x093D}, {0x093E, 0x0940}, {0x0941, 0x0948}, + {0x0949, 0x094C}, {0x094D, 0x094D}, {0x094E, 0x094F}, + {0x0950, 0x0950}, {0x0951, 0x0957}, {0x0958, 0x0961}, + {0x0962, 0x0963}, {0x0964, 0x0965}, {0x0966, 0x096F}, + {0x0970, 0x0970}, {0x0971, 0x0971}, {0x0972, 0x097F}, + {0x0980, 0x0980}, {0x0981, 0x0981}, {0x0982, 0x0983}, + {0x0985, 0x098C}, {0x098F, 0x0990}, {0x0993, 0x09A8}, + {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, {0x09B6, 0x09B9}, + {0x09BC, 0x09BC}, {0x09BD, 0x09BD}, {0x09BE, 0x09C0}, + {0x09C1, 0x09C4}, {0x09C7, 0x09C8}, {0x09CB, 0x09CC}, + {0x09CD, 0x09CD}, {0x09CE, 0x09CE}, {0x09D7, 0x09D7}, + {0x09DC, 0x09DD}, {0x09DF, 0x09E1}, {0x09E2, 0x09E3}, + {0x09E6, 0x09EF}, {0x09F0, 0x09F1}, {0x09F2, 0x09F3}, + {0x09F4, 0x09F9}, {0x09FA, 0x09FA}, {0x09FB, 0x09FB}, + {0x0A01, 0x0A02}, {0x0A03, 0x0A03}, {0x0A05, 0x0A0A}, + {0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, {0x0A2A, 0x0A30}, + {0x0A32, 0x0A33}, {0x0A35, 0x0A36}, {0x0A38, 0x0A39}, + {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A40}, {0x0A41, 0x0A42}, + {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, + {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A6F}, + {0x0A70, 0x0A71}, {0x0A72, 0x0A74}, {0x0A75, 0x0A75}, + {0x0A81, 0x0A82}, {0x0A83, 0x0A83}, {0x0A85, 0x0A8D}, + {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, + {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9}, {0x0ABC, 0x0ABC}, + {0x0ABD, 0x0ABD}, {0x0ABE, 0x0AC0}, {0x0AC1, 0x0AC5}, + {0x0AC7, 0x0AC8}, {0x0AC9, 0x0AC9}, {0x0ACB, 0x0ACC}, + {0x0ACD, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE1}, + {0x0AE2, 0x0AE3}, {0x0AE6, 0x0AEF}, {0x0AF0, 0x0AF0}, + {0x0AF1, 0x0AF1}, {0x0AF9, 0x0AF9}, {0x0B01, 0x0B01}, + {0x0B02, 0x0B03}, {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, + {0x0B13, 0x0B28}, {0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, + {0x0B35, 0x0B39}, {0x0B3C, 0x0B3C}, {0x0B3D, 0x0B3D}, + {0x0B3E, 0x0B3E}, {0x0B3F, 0x0B3F}, {0x0B40, 0x0B40}, + {0x0B41, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4C}, + {0x0B4D, 0x0B4D}, {0x0B56, 0x0B56}, {0x0B57, 0x0B57}, + {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B61}, {0x0B62, 0x0B63}, + {0x0B66, 0x0B6F}, {0x0B70, 0x0B70}, {0x0B71, 0x0B71}, + {0x0B72, 0x0B77}, {0x0B82, 0x0B82}, {0x0B83, 0x0B83}, + {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, + {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, + {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, + {0x0BBE, 0x0BBF}, {0x0BC0, 0x0BC0}, {0x0BC1, 0x0BC2}, + {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCC}, {0x0BCD, 0x0BCD}, + {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BEF}, + {0x0BF0, 0x0BF2}, {0x0BF3, 0x0BF8}, {0x0BF9, 0x0BF9}, + {0x0BFA, 0x0BFA}, {0x0C00, 0x0C00}, {0x0C01, 0x0C03}, + {0x0C05, 0x0C0C}, {0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, + {0x0C2A, 0x0C39}, {0x0C3D, 0x0C3D}, {0x0C3E, 0x0C40}, + {0x0C41, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, + {0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C61}, + {0x0C62, 0x0C63}, {0x0C66, 0x0C6F}, {0x0C78, 0x0C7E}, + {0x0C7F, 0x0C7F}, {0x0C80, 0x0C80}, {0x0C81, 0x0C81}, + {0x0C82, 0x0C83}, {0x0C85, 0x0C8C}, {0x0C8E, 0x0C90}, + {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9}, + {0x0CBC, 0x0CBC}, {0x0CBD, 0x0CBD}, {0x0CBE, 0x0CBE}, + {0x0CBF, 0x0CBF}, {0x0CC0, 0x0CC4}, {0x0CC6, 0x0CC6}, + {0x0CC7, 0x0CC8}, {0x0CCA, 0x0CCB}, {0x0CCC, 0x0CCD}, + {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE1}, + {0x0CE2, 0x0CE3}, {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, + {0x0D01, 0x0D01}, {0x0D02, 0x0D03}, {0x0D05, 0x0D0C}, + {0x0D0E, 0x0D10}, {0x0D12, 0x0D3A}, {0x0D3D, 0x0D3D}, + {0x0D3E, 0x0D40}, {0x0D41, 0x0D44}, {0x0D46, 0x0D48}, + {0x0D4A, 0x0D4C}, {0x0D4D, 0x0D4D}, {0x0D4E, 0x0D4E}, + {0x0D4F, 0x0D4F}, {0x0D54, 0x0D56}, {0x0D57, 0x0D57}, + {0x0D58, 0x0D5E}, {0x0D5F, 0x0D61}, {0x0D62, 0x0D63}, + {0x0D66, 0x0D6F}, {0x0D70, 0x0D78}, {0x0D79, 0x0D79}, + {0x0D7A, 0x0D7F}, {0x0D82, 0x0D83}, {0x0D85, 0x0D96}, + {0x0D9A, 0x0DB1}, {0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, + {0x0DC0, 0x0DC6}, {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD1}, + {0x0DD2, 0x0DD4}, {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, + {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF3}, {0x0DF4, 0x0DF4}, + {0x0E01, 0x0E30}, {0x0E31, 0x0E31}, {0x0E32, 0x0E33}, + {0x0E34, 0x0E3A}, {0x0E3F, 0x0E3F}, {0x0E40, 0x0E45}, + {0x0E46, 0x0E46}, {0x0E47, 0x0E4E}, {0x0E4F, 0x0E4F}, + {0x0E50, 0x0E59}, {0x0E5A, 0x0E5B}, {0x0E81, 0x0E82}, + {0x0E84, 0x0E84}, {0x0E87, 0x0E88}, {0x0E8A, 0x0E8A}, + {0x0E8D, 0x0E8D}, {0x0E94, 0x0E97}, {0x0E99, 0x0E9F}, + {0x0EA1, 0x0EA3}, {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EA7}, + {0x0EAA, 0x0EAB}, {0x0EAD, 0x0EB0}, {0x0EB1, 0x0EB1}, + {0x0EB2, 0x0EB3}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC}, + {0x0EBD, 0x0EBD}, {0x0EC0, 0x0EC4}, {0x0EC6, 0x0EC6}, + {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9}, {0x0EDC, 0x0EDF}, + {0x0F00, 0x0F00}, {0x0F01, 0x0F03}, {0x0F04, 0x0F12}, + {0x0F13, 0x0F13}, {0x0F14, 0x0F14}, {0x0F15, 0x0F17}, + {0x0F18, 0x0F19}, {0x0F1A, 0x0F1F}, {0x0F20, 0x0F29}, + {0x0F2A, 0x0F33}, {0x0F34, 0x0F34}, {0x0F35, 0x0F35}, + {0x0F36, 0x0F36}, {0x0F37, 0x0F37}, {0x0F38, 0x0F38}, + {0x0F39, 0x0F39}, {0x0F3A, 0x0F3A}, {0x0F3B, 0x0F3B}, + {0x0F3C, 0x0F3C}, {0x0F3D, 0x0F3D}, {0x0F3E, 0x0F3F}, + {0x0F40, 0x0F47}, {0x0F49, 0x0F6C}, {0x0F71, 0x0F7E}, + {0x0F7F, 0x0F7F}, {0x0F80, 0x0F84}, {0x0F85, 0x0F85}, + {0x0F86, 0x0F87}, {0x0F88, 0x0F8C}, {0x0F8D, 0x0F97}, + {0x0F99, 0x0FBC}, {0x0FBE, 0x0FC5}, {0x0FC6, 0x0FC6}, + {0x0FC7, 0x0FCC}, {0x0FCE, 0x0FCF}, {0x0FD0, 0x0FD4}, + {0x0FD5, 0x0FD8}, {0x0FD9, 0x0FDA}, {0x1000, 0x102A}, + {0x102B, 0x102C}, {0x102D, 0x1030}, {0x1031, 0x1031}, + {0x1032, 0x1037}, {0x1038, 0x1038}, {0x1039, 0x103A}, + {0x103B, 0x103C}, {0x103D, 0x103E}, {0x103F, 0x103F}, + {0x1040, 0x1049}, {0x104A, 0x104F}, {0x1050, 0x1055}, + {0x1056, 0x1057}, {0x1058, 0x1059}, {0x105A, 0x105D}, + {0x105E, 0x1060}, {0x1061, 0x1061}, {0x1062, 0x1064}, + {0x1065, 0x1066}, {0x1067, 0x106D}, {0x106E, 0x1070}, + {0x1071, 0x1074}, {0x1075, 0x1081}, {0x1082, 0x1082}, + {0x1083, 0x1084}, {0x1085, 0x1086}, {0x1087, 0x108C}, + {0x108D, 0x108D}, {0x108E, 0x108E}, {0x108F, 0x108F}, + {0x1090, 0x1099}, {0x109A, 0x109C}, {0x109D, 0x109D}, + {0x109E, 0x109F}, {0x10A0, 0x10C5}, {0x10C7, 0x10C7}, + {0x10CD, 0x10CD}, {0x10D0, 0x10FA}, {0x10FB, 0x10FB}, + {0x10FC, 0x10FC}, {0x10FD, 0x10FF}, {0x1160, 0x11FF}, + {0x1200, 0x1248}, {0x124A, 0x124D}, {0x1250, 0x1256}, + {0x1258, 0x1258}, {0x125A, 0x125D}, {0x1260, 0x1288}, + {0x128A, 0x128D}, {0x1290, 0x12B0}, {0x12B2, 0x12B5}, + {0x12B8, 0x12BE}, {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, + {0x12C8, 0x12D6}, {0x12D8, 0x1310}, {0x1312, 0x1315}, + {0x1318, 0x135A}, {0x135D, 0x135F}, {0x1360, 0x1368}, + {0x1369, 0x137C}, {0x1380, 0x138F}, {0x1390, 0x1399}, + {0x13A0, 0x13F5}, {0x13F8, 0x13FD}, {0x1400, 0x1400}, + {0x1401, 0x166C}, {0x166D, 0x166E}, {0x166F, 0x167F}, + {0x1680, 0x1680}, {0x1681, 0x169A}, {0x169B, 0x169B}, + {0x169C, 0x169C}, {0x16A0, 0x16EA}, {0x16EB, 0x16ED}, + {0x16EE, 0x16F0}, {0x16F1, 0x16F8}, {0x1700, 0x170C}, + {0x170E, 0x1711}, {0x1712, 0x1714}, {0x1720, 0x1731}, + {0x1732, 0x1734}, {0x1735, 0x1736}, {0x1740, 0x1751}, + {0x1752, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770}, + {0x1772, 0x1773}, {0x1780, 0x17B3}, {0x17B4, 0x17B5}, + {0x17B6, 0x17B6}, {0x17B7, 0x17BD}, {0x17BE, 0x17C5}, + {0x17C6, 0x17C6}, {0x17C7, 0x17C8}, {0x17C9, 0x17D3}, + {0x17D4, 0x17D6}, {0x17D7, 0x17D7}, {0x17D8, 0x17DA}, + {0x17DB, 0x17DB}, {0x17DC, 0x17DC}, {0x17DD, 0x17DD}, + {0x17E0, 0x17E9}, {0x17F0, 0x17F9}, {0x1800, 0x1805}, + {0x1806, 0x1806}, {0x1807, 0x180A}, {0x180B, 0x180D}, + {0x180E, 0x180E}, {0x1810, 0x1819}, {0x1820, 0x1842}, + {0x1843, 0x1843}, {0x1844, 0x1877}, {0x1880, 0x1884}, + {0x1885, 0x1886}, {0x1887, 0x18A8}, {0x18A9, 0x18A9}, + {0x18AA, 0x18AA}, {0x18B0, 0x18F5}, {0x1900, 0x191E}, + {0x1920, 0x1922}, {0x1923, 0x1926}, {0x1927, 0x1928}, + {0x1929, 0x192B}, {0x1930, 0x1931}, {0x1932, 0x1932}, + {0x1933, 0x1938}, {0x1939, 0x193B}, {0x1940, 0x1940}, + {0x1944, 0x1945}, {0x1946, 0x194F}, {0x1950, 0x196D}, + {0x1970, 0x1974}, {0x1980, 0x19AB}, {0x19B0, 0x19C9}, + {0x19D0, 0x19D9}, {0x19DA, 0x19DA}, {0x19DE, 0x19DF}, + {0x19E0, 0x19FF}, {0x1A00, 0x1A16}, {0x1A17, 0x1A18}, + {0x1A19, 0x1A1A}, {0x1A1B, 0x1A1B}, {0x1A1E, 0x1A1F}, + {0x1A20, 0x1A54}, {0x1A55, 0x1A55}, {0x1A56, 0x1A56}, + {0x1A57, 0x1A57}, {0x1A58, 0x1A5E}, {0x1A60, 0x1A60}, + {0x1A61, 0x1A61}, {0x1A62, 0x1A62}, {0x1A63, 0x1A64}, + {0x1A65, 0x1A6C}, {0x1A6D, 0x1A72}, {0x1A73, 0x1A7C}, + {0x1A7F, 0x1A7F}, {0x1A80, 0x1A89}, {0x1A90, 0x1A99}, + {0x1AA0, 0x1AA6}, {0x1AA7, 0x1AA7}, {0x1AA8, 0x1AAD}, + {0x1AB0, 0x1ABD}, {0x1ABE, 0x1ABE}, {0x1B00, 0x1B03}, + {0x1B04, 0x1B04}, {0x1B05, 0x1B33}, {0x1B34, 0x1B34}, + {0x1B35, 0x1B35}, {0x1B36, 0x1B3A}, {0x1B3B, 0x1B3B}, + {0x1B3C, 0x1B3C}, {0x1B3D, 0x1B41}, {0x1B42, 0x1B42}, + {0x1B43, 0x1B44}, {0x1B45, 0x1B4B}, {0x1B50, 0x1B59}, + {0x1B5A, 0x1B60}, {0x1B61, 0x1B6A}, {0x1B6B, 0x1B73}, + {0x1B74, 0x1B7C}, {0x1B80, 0x1B81}, {0x1B82, 0x1B82}, + {0x1B83, 0x1BA0}, {0x1BA1, 0x1BA1}, {0x1BA2, 0x1BA5}, + {0x1BA6, 0x1BA7}, {0x1BA8, 0x1BA9}, {0x1BAA, 0x1BAA}, + {0x1BAB, 0x1BAD}, {0x1BAE, 0x1BAF}, {0x1BB0, 0x1BB9}, + {0x1BBA, 0x1BBF}, {0x1BC0, 0x1BE5}, {0x1BE6, 0x1BE6}, + {0x1BE7, 0x1BE7}, {0x1BE8, 0x1BE9}, {0x1BEA, 0x1BEC}, + {0x1BED, 0x1BED}, {0x1BEE, 0x1BEE}, {0x1BEF, 0x1BF1}, + {0x1BF2, 0x1BF3}, {0x1BFC, 0x1BFF}, {0x1C00, 0x1C23}, + {0x1C24, 0x1C2B}, {0x1C2C, 0x1C33}, {0x1C34, 0x1C35}, + {0x1C36, 0x1C37}, {0x1C3B, 0x1C3F}, {0x1C40, 0x1C49}, + {0x1C4D, 0x1C4F}, {0x1C50, 0x1C59}, {0x1C5A, 0x1C77}, + {0x1C78, 0x1C7D}, {0x1C7E, 0x1C7F}, {0x1C80, 0x1C88}, + {0x1CC0, 0x1CC7}, {0x1CD0, 0x1CD2}, {0x1CD3, 0x1CD3}, + {0x1CD4, 0x1CE0}, {0x1CE1, 0x1CE1}, {0x1CE2, 0x1CE8}, + {0x1CE9, 0x1CEC}, {0x1CED, 0x1CED}, {0x1CEE, 0x1CF1}, + {0x1CF2, 0x1CF3}, {0x1CF4, 0x1CF4}, {0x1CF5, 0x1CF6}, + {0x1CF8, 0x1CF9}, {0x1D00, 0x1D2B}, {0x1D2C, 0x1D6A}, + {0x1D6B, 0x1D77}, {0x1D78, 0x1D78}, {0x1D79, 0x1D7F}, + {0x1D80, 0x1D9A}, {0x1D9B, 0x1DBF}, {0x1DC0, 0x1DF5}, + {0x1DFB, 0x1DFF}, {0x1E00, 0x1EFF}, {0x1F00, 0x1F15}, + {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, + {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, + {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, + {0x1FB6, 0x1FBC}, {0x1FBD, 0x1FBD}, {0x1FBE, 0x1FBE}, + {0x1FBF, 0x1FC1}, {0x1FC2, 0x1FC4}, {0x1FC6, 0x1FCC}, + {0x1FCD, 0x1FCF}, {0x1FD0, 0x1FD3}, {0x1FD6, 0x1FDB}, + {0x1FDD, 0x1FDF}, {0x1FE0, 0x1FEC}, {0x1FED, 0x1FEF}, + {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFC}, {0x1FFD, 0x1FFE}, + {0x2000, 0x200A}, {0x200B, 0x200F}, {0x2011, 0x2012}, + {0x2017, 0x2017}, {0x201A, 0x201A}, {0x201B, 0x201B}, + {0x201E, 0x201E}, {0x201F, 0x201F}, {0x2023, 0x2023}, + {0x2028, 0x2028}, {0x2029, 0x2029}, {0x202A, 0x202E}, + {0x202F, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034}, + {0x2036, 0x2038}, {0x2039, 0x2039}, {0x203A, 0x203A}, + {0x203C, 0x203D}, {0x203F, 0x2040}, {0x2041, 0x2043}, + {0x2044, 0x2044}, {0x2045, 0x2045}, {0x2046, 0x2046}, + {0x2047, 0x2051}, {0x2052, 0x2052}, {0x2053, 0x2053}, + {0x2054, 0x2054}, {0x2055, 0x205E}, {0x205F, 0x205F}, + {0x2060, 0x2064}, {0x2066, 0x206F}, {0x2070, 0x2070}, + {0x2071, 0x2071}, {0x2075, 0x2079}, {0x207A, 0x207C}, + {0x207D, 0x207D}, {0x207E, 0x207E}, {0x2080, 0x2080}, + {0x2085, 0x2089}, {0x208A, 0x208C}, {0x208D, 0x208D}, + {0x208E, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8}, + {0x20AA, 0x20AB}, {0x20AD, 0x20BE}, {0x20D0, 0x20DC}, + {0x20DD, 0x20E0}, {0x20E1, 0x20E1}, {0x20E2, 0x20E4}, + {0x20E5, 0x20F0}, {0x2100, 0x2101}, {0x2102, 0x2102}, + {0x2104, 0x2104}, {0x2106, 0x2106}, {0x2107, 0x2107}, + {0x2108, 0x2108}, {0x210A, 0x2112}, {0x2114, 0x2114}, + {0x2115, 0x2115}, {0x2117, 0x2117}, {0x2118, 0x2118}, + {0x2119, 0x211D}, {0x211E, 0x2120}, {0x2123, 0x2123}, + {0x2124, 0x2124}, {0x2125, 0x2125}, {0x2127, 0x2127}, + {0x2128, 0x2128}, {0x2129, 0x2129}, {0x212A, 0x212A}, + {0x212C, 0x212D}, {0x212E, 0x212E}, {0x212F, 0x2134}, + {0x2135, 0x2138}, {0x2139, 0x2139}, {0x213A, 0x213B}, + {0x213C, 0x213F}, {0x2140, 0x2144}, {0x2145, 0x2149}, + {0x214A, 0x214A}, {0x214B, 0x214B}, {0x214C, 0x214D}, + {0x214E, 0x214E}, {0x214F, 0x214F}, {0x2150, 0x2152}, + {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F}, + {0x217A, 0x2182}, {0x2183, 0x2184}, {0x2185, 0x2188}, + {0x218A, 0x218B}, {0x219A, 0x219B}, {0x219C, 0x219F}, + {0x21A0, 0x21A0}, {0x21A1, 0x21A2}, {0x21A3, 0x21A3}, + {0x21A4, 0x21A5}, {0x21A6, 0x21A6}, {0x21A7, 0x21AD}, + {0x21AE, 0x21AE}, {0x21AF, 0x21B7}, {0x21BA, 0x21CD}, + {0x21CE, 0x21CF}, {0x21D0, 0x21D1}, {0x21D3, 0x21D3}, + {0x21D5, 0x21E6}, {0x21E8, 0x21F3}, {0x21F4, 0x21FF}, + {0x2201, 0x2201}, {0x2204, 0x2206}, {0x2209, 0x220A}, + {0x220C, 0x220E}, {0x2210, 0x2210}, {0x2212, 0x2214}, + {0x2216, 0x2219}, {0x221B, 0x221C}, {0x2221, 0x2222}, + {0x2224, 0x2224}, {0x2226, 0x2226}, {0x222D, 0x222D}, + {0x222F, 0x2233}, {0x2238, 0x223B}, {0x223E, 0x2247}, + {0x2249, 0x224B}, {0x224D, 0x2251}, {0x2253, 0x225F}, + {0x2262, 0x2263}, {0x2268, 0x2269}, {0x226C, 0x226D}, + {0x2270, 0x2281}, {0x2284, 0x2285}, {0x2288, 0x2294}, + {0x2296, 0x2298}, {0x229A, 0x22A4}, {0x22A6, 0x22BE}, + {0x22C0, 0x22FF}, {0x2300, 0x2307}, {0x2308, 0x2308}, + {0x2309, 0x2309}, {0x230A, 0x230A}, {0x230B, 0x230B}, + {0x230C, 0x2311}, {0x2313, 0x2319}, {0x231C, 0x231F}, + {0x2320, 0x2321}, {0x2322, 0x2328}, {0x232B, 0x237B}, + {0x237C, 0x237C}, {0x237D, 0x239A}, {0x239B, 0x23B3}, + {0x23B4, 0x23DB}, {0x23DC, 0x23E1}, {0x23E2, 0x23E8}, + {0x23ED, 0x23EF}, {0x23F1, 0x23F2}, {0x23F4, 0x23FE}, + {0x2400, 0x2426}, {0x2440, 0x244A}, {0x24EA, 0x24EA}, + {0x254C, 0x254F}, {0x2574, 0x257F}, {0x2590, 0x2591}, + {0x2596, 0x259F}, {0x25A2, 0x25A2}, {0x25AA, 0x25B1}, + {0x25B4, 0x25B5}, {0x25B8, 0x25BB}, {0x25BE, 0x25BF}, + {0x25C2, 0x25C5}, {0x25C9, 0x25CA}, {0x25CC, 0x25CD}, + {0x25D2, 0x25E1}, {0x25E6, 0x25EE}, {0x25F0, 0x25F7}, + {0x25F8, 0x25FC}, {0x25FF, 0x25FF}, {0x2600, 0x2604}, + {0x2607, 0x2608}, {0x260A, 0x260D}, {0x2610, 0x2613}, + {0x2616, 0x261B}, {0x261D, 0x261D}, {0x261F, 0x263F}, + {0x2641, 0x2641}, {0x2643, 0x2647}, {0x2654, 0x265F}, + {0x2662, 0x2662}, {0x2666, 0x2666}, {0x266B, 0x266B}, + {0x266E, 0x266E}, {0x2670, 0x267E}, {0x2680, 0x2692}, + {0x2694, 0x269D}, {0x26A0, 0x26A0}, {0x26A2, 0x26A9}, + {0x26AC, 0x26BC}, {0x26C0, 0x26C3}, {0x26E2, 0x26E2}, + {0x26E4, 0x26E7}, {0x2700, 0x2704}, {0x2706, 0x2709}, + {0x270C, 0x2727}, {0x2729, 0x273C}, {0x273E, 0x274B}, + {0x274D, 0x274D}, {0x274F, 0x2752}, {0x2756, 0x2756}, + {0x2758, 0x2767}, {0x2768, 0x2768}, {0x2769, 0x2769}, + {0x276A, 0x276A}, {0x276B, 0x276B}, {0x276C, 0x276C}, + {0x276D, 0x276D}, {0x276E, 0x276E}, {0x276F, 0x276F}, + {0x2770, 0x2770}, {0x2771, 0x2771}, {0x2772, 0x2772}, + {0x2773, 0x2773}, {0x2774, 0x2774}, {0x2775, 0x2775}, + {0x2780, 0x2793}, {0x2794, 0x2794}, {0x2798, 0x27AF}, + {0x27B1, 0x27BE}, {0x27C0, 0x27C4}, {0x27C5, 0x27C5}, + {0x27C6, 0x27C6}, {0x27C7, 0x27E5}, {0x27EE, 0x27EE}, + {0x27EF, 0x27EF}, {0x27F0, 0x27FF}, {0x2800, 0x28FF}, + {0x2900, 0x297F}, {0x2980, 0x2982}, {0x2983, 0x2983}, + {0x2984, 0x2984}, {0x2987, 0x2987}, {0x2988, 0x2988}, + {0x2989, 0x2989}, {0x298A, 0x298A}, {0x298B, 0x298B}, + {0x298C, 0x298C}, {0x298D, 0x298D}, {0x298E, 0x298E}, + {0x298F, 0x298F}, {0x2990, 0x2990}, {0x2991, 0x2991}, + {0x2992, 0x2992}, {0x2993, 0x2993}, {0x2994, 0x2994}, + {0x2995, 0x2995}, {0x2996, 0x2996}, {0x2997, 0x2997}, + {0x2998, 0x2998}, {0x2999, 0x29D7}, {0x29D8, 0x29D8}, + {0x29D9, 0x29D9}, {0x29DA, 0x29DA}, {0x29DB, 0x29DB}, + {0x29DC, 0x29FB}, {0x29FC, 0x29FC}, {0x29FD, 0x29FD}, + {0x29FE, 0x29FF}, {0x2A00, 0x2AFF}, {0x2B00, 0x2B1A}, + {0x2B1D, 0x2B2F}, {0x2B30, 0x2B44}, {0x2B45, 0x2B46}, + {0x2B47, 0x2B4C}, {0x2B4D, 0x2B4F}, {0x2B51, 0x2B54}, + {0x2B5A, 0x2B73}, {0x2B76, 0x2B95}, {0x2B98, 0x2BB9}, + {0x2BBD, 0x2BC8}, {0x2BCA, 0x2BD1}, {0x2BEC, 0x2BEF}, + {0x2C00, 0x2C2E}, {0x2C30, 0x2C5E}, {0x2C60, 0x2C7B}, + {0x2C7C, 0x2C7D}, {0x2C7E, 0x2C7F}, {0x2C80, 0x2CE4}, + {0x2CE5, 0x2CEA}, {0x2CEB, 0x2CEE}, {0x2CEF, 0x2CF1}, + {0x2CF2, 0x2CF3}, {0x2CF9, 0x2CFC}, {0x2CFD, 0x2CFD}, + {0x2CFE, 0x2CFF}, {0x2D00, 0x2D25}, {0x2D27, 0x2D27}, + {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D6F}, + {0x2D70, 0x2D70}, {0x2D7F, 0x2D7F}, {0x2D80, 0x2D96}, + {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6}, + {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE}, + {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, {0x2DE0, 0x2DFF}, + {0x2E00, 0x2E01}, {0x2E02, 0x2E02}, {0x2E03, 0x2E03}, + {0x2E04, 0x2E04}, {0x2E05, 0x2E05}, {0x2E06, 0x2E08}, + {0x2E09, 0x2E09}, {0x2E0A, 0x2E0A}, {0x2E0B, 0x2E0B}, + {0x2E0C, 0x2E0C}, {0x2E0D, 0x2E0D}, {0x2E0E, 0x2E16}, + {0x2E17, 0x2E17}, {0x2E18, 0x2E19}, {0x2E1A, 0x2E1A}, + {0x2E1B, 0x2E1B}, {0x2E1C, 0x2E1C}, {0x2E1D, 0x2E1D}, + {0x2E1E, 0x2E1F}, {0x2E20, 0x2E20}, {0x2E21, 0x2E21}, + {0x2E22, 0x2E22}, {0x2E23, 0x2E23}, {0x2E24, 0x2E24}, + {0x2E25, 0x2E25}, {0x2E26, 0x2E26}, {0x2E27, 0x2E27}, + {0x2E28, 0x2E28}, {0x2E29, 0x2E29}, {0x2E2A, 0x2E2E}, + {0x2E2F, 0x2E2F}, {0x2E30, 0x2E39}, {0x2E3A, 0x2E3B}, + {0x2E3C, 0x2E3F}, {0x2E40, 0x2E40}, {0x2E41, 0x2E41}, + {0x2E42, 0x2E42}, {0x2E43, 0x2E44}, {0x303F, 0x303F}, + {0x4DC0, 0x4DFF}, {0xA4D0, 0xA4F7}, {0xA4F8, 0xA4FD}, + {0xA4FE, 0xA4FF}, {0xA500, 0xA60B}, {0xA60C, 0xA60C}, + {0xA60D, 0xA60F}, {0xA610, 0xA61F}, {0xA620, 0xA629}, + {0xA62A, 0xA62B}, {0xA640, 0xA66D}, {0xA66E, 0xA66E}, + {0xA66F, 0xA66F}, {0xA670, 0xA672}, {0xA673, 0xA673}, + {0xA674, 0xA67D}, {0xA67E, 0xA67E}, {0xA67F, 0xA67F}, + {0xA680, 0xA69B}, {0xA69C, 0xA69D}, {0xA69E, 0xA69F}, + {0xA6A0, 0xA6E5}, {0xA6E6, 0xA6EF}, {0xA6F0, 0xA6F1}, + {0xA6F2, 0xA6F7}, {0xA700, 0xA716}, {0xA717, 0xA71F}, + {0xA720, 0xA721}, {0xA722, 0xA76F}, {0xA770, 0xA770}, + {0xA771, 0xA787}, {0xA788, 0xA788}, {0xA789, 0xA78A}, + {0xA78B, 0xA78E}, {0xA78F, 0xA78F}, {0xA790, 0xA7AE}, + {0xA7B0, 0xA7B7}, {0xA7F7, 0xA7F7}, {0xA7F8, 0xA7F9}, + {0xA7FA, 0xA7FA}, {0xA7FB, 0xA7FF}, {0xA800, 0xA801}, + {0xA802, 0xA802}, {0xA803, 0xA805}, {0xA806, 0xA806}, + {0xA807, 0xA80A}, {0xA80B, 0xA80B}, {0xA80C, 0xA822}, + {0xA823, 0xA824}, {0xA825, 0xA826}, {0xA827, 0xA827}, + {0xA828, 0xA82B}, {0xA830, 0xA835}, {0xA836, 0xA837}, + {0xA838, 0xA838}, {0xA839, 0xA839}, {0xA840, 0xA873}, + {0xA874, 0xA877}, {0xA880, 0xA881}, {0xA882, 0xA8B3}, + {0xA8B4, 0xA8C3}, {0xA8C4, 0xA8C5}, {0xA8CE, 0xA8CF}, + {0xA8D0, 0xA8D9}, {0xA8E0, 0xA8F1}, {0xA8F2, 0xA8F7}, + {0xA8F8, 0xA8FA}, {0xA8FB, 0xA8FB}, {0xA8FC, 0xA8FC}, + {0xA8FD, 0xA8FD}, {0xA900, 0xA909}, {0xA90A, 0xA925}, + {0xA926, 0xA92D}, {0xA92E, 0xA92F}, {0xA930, 0xA946}, + {0xA947, 0xA951}, {0xA952, 0xA953}, {0xA95F, 0xA95F}, + {0xA980, 0xA982}, {0xA983, 0xA983}, {0xA984, 0xA9B2}, + {0xA9B3, 0xA9B3}, {0xA9B4, 0xA9B5}, {0xA9B6, 0xA9B9}, + {0xA9BA, 0xA9BB}, {0xA9BC, 0xA9BC}, {0xA9BD, 0xA9C0}, + {0xA9C1, 0xA9CD}, {0xA9CF, 0xA9CF}, {0xA9D0, 0xA9D9}, + {0xA9DE, 0xA9DF}, {0xA9E0, 0xA9E4}, {0xA9E5, 0xA9E5}, + {0xA9E6, 0xA9E6}, {0xA9E7, 0xA9EF}, {0xA9F0, 0xA9F9}, + {0xA9FA, 0xA9FE}, {0xAA00, 0xAA28}, {0xAA29, 0xAA2E}, + {0xAA2F, 0xAA30}, {0xAA31, 0xAA32}, {0xAA33, 0xAA34}, + {0xAA35, 0xAA36}, {0xAA40, 0xAA42}, {0xAA43, 0xAA43}, + {0xAA44, 0xAA4B}, {0xAA4C, 0xAA4C}, {0xAA4D, 0xAA4D}, + {0xAA50, 0xAA59}, {0xAA5C, 0xAA5F}, {0xAA60, 0xAA6F}, + {0xAA70, 0xAA70}, {0xAA71, 0xAA76}, {0xAA77, 0xAA79}, + {0xAA7A, 0xAA7A}, {0xAA7B, 0xAA7B}, {0xAA7C, 0xAA7C}, + {0xAA7D, 0xAA7D}, {0xAA7E, 0xAA7F}, {0xAA80, 0xAAAF}, + {0xAAB0, 0xAAB0}, {0xAAB1, 0xAAB1}, {0xAAB2, 0xAAB4}, + {0xAAB5, 0xAAB6}, {0xAAB7, 0xAAB8}, {0xAAB9, 0xAABD}, + {0xAABE, 0xAABF}, {0xAAC0, 0xAAC0}, {0xAAC1, 0xAAC1}, + {0xAAC2, 0xAAC2}, {0xAADB, 0xAADC}, {0xAADD, 0xAADD}, + {0xAADE, 0xAADF}, {0xAAE0, 0xAAEA}, {0xAAEB, 0xAAEB}, + {0xAAEC, 0xAAED}, {0xAAEE, 0xAAEF}, {0xAAF0, 0xAAF1}, + {0xAAF2, 0xAAF2}, {0xAAF3, 0xAAF4}, {0xAAF5, 0xAAF5}, + {0xAAF6, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, + {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, + {0xAB30, 0xAB5A}, {0xAB5B, 0xAB5B}, {0xAB5C, 0xAB5F}, + {0xAB60, 0xAB65}, {0xAB70, 0xABBF}, {0xABC0, 0xABE2}, + {0xABE3, 0xABE4}, {0xABE5, 0xABE5}, {0xABE6, 0xABE7}, + {0xABE8, 0xABE8}, {0xABE9, 0xABEA}, {0xABEB, 0xABEB}, + {0xABEC, 0xABEC}, {0xABED, 0xABED}, {0xABF0, 0xABF9}, + {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDB7F}, + {0xDB80, 0xDBFF}, {0xDC00, 0xDFFF}, {0xFB00, 0xFB06}, + {0xFB13, 0xFB17}, {0xFB1D, 0xFB1D}, {0xFB1E, 0xFB1E}, + {0xFB1F, 0xFB28}, {0xFB29, 0xFB29}, {0xFB2A, 0xFB36}, + {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, + {0xFB43, 0xFB44}, {0xFB46, 0xFB4F}, {0xFB50, 0xFBB1}, + {0xFBB2, 0xFBC1}, {0xFBD3, 0xFD3D}, {0xFD3E, 0xFD3E}, + {0xFD3F, 0xFD3F}, {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, + {0xFDF0, 0xFDFB}, {0xFDFC, 0xFDFC}, {0xFDFD, 0xFDFD}, + {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, + {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFC, 0xFFFC}, + {0x10000, 0x1000B}, {0x1000D, 0x10026}, {0x10028, 0x1003A}, + {0x1003C, 0x1003D}, {0x1003F, 0x1004D}, {0x10050, 0x1005D}, + {0x10080, 0x100FA}, {0x10100, 0x10102}, {0x10107, 0x10133}, + {0x10137, 0x1013F}, {0x10140, 0x10174}, {0x10175, 0x10178}, + {0x10179, 0x10189}, {0x1018A, 0x1018B}, {0x1018C, 0x1018E}, + {0x10190, 0x1019B}, {0x101A0, 0x101A0}, {0x101D0, 0x101FC}, + {0x101FD, 0x101FD}, {0x10280, 0x1029C}, {0x102A0, 0x102D0}, + {0x102E0, 0x102E0}, {0x102E1, 0x102FB}, {0x10300, 0x1031F}, + {0x10320, 0x10323}, {0x10330, 0x10340}, {0x10341, 0x10341}, + {0x10342, 0x10349}, {0x1034A, 0x1034A}, {0x10350, 0x10375}, + {0x10376, 0x1037A}, {0x10380, 0x1039D}, {0x1039F, 0x1039F}, + {0x103A0, 0x103C3}, {0x103C8, 0x103CF}, {0x103D0, 0x103D0}, + {0x103D1, 0x103D5}, {0x10400, 0x1044F}, {0x10450, 0x1047F}, + {0x10480, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3}, + {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, + {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755}, + {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808}, + {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C}, + {0x1083F, 0x1083F}, {0x10840, 0x10855}, {0x10857, 0x10857}, + {0x10858, 0x1085F}, {0x10860, 0x10876}, {0x10877, 0x10878}, + {0x10879, 0x1087F}, {0x10880, 0x1089E}, {0x108A7, 0x108AF}, + {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x108FF}, + {0x10900, 0x10915}, {0x10916, 0x1091B}, {0x1091F, 0x1091F}, + {0x10920, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x1099F}, + {0x109A0, 0x109B7}, {0x109BC, 0x109BD}, {0x109BE, 0x109BF}, + {0x109C0, 0x109CF}, {0x109D2, 0x109FF}, {0x10A00, 0x10A00}, + {0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, + {0x10A10, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A33}, + {0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F}, {0x10A40, 0x10A47}, + {0x10A50, 0x10A58}, {0x10A60, 0x10A7C}, {0x10A7D, 0x10A7E}, + {0x10A7F, 0x10A7F}, {0x10A80, 0x10A9C}, {0x10A9D, 0x10A9F}, + {0x10AC0, 0x10AC7}, {0x10AC8, 0x10AC8}, {0x10AC9, 0x10AE4}, + {0x10AE5, 0x10AE6}, {0x10AEB, 0x10AEF}, {0x10AF0, 0x10AF6}, + {0x10B00, 0x10B35}, {0x10B39, 0x10B3F}, {0x10B40, 0x10B55}, + {0x10B58, 0x10B5F}, {0x10B60, 0x10B72}, {0x10B78, 0x10B7F}, + {0x10B80, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF}, + {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, + {0x10CFA, 0x10CFF}, {0x10E60, 0x10E7E}, {0x11000, 0x11000}, + {0x11001, 0x11001}, {0x11002, 0x11002}, {0x11003, 0x11037}, + {0x11038, 0x11046}, {0x11047, 0x1104D}, {0x11052, 0x11065}, + {0x11066, 0x1106F}, {0x1107F, 0x1107F}, {0x11080, 0x11081}, + {0x11082, 0x11082}, {0x11083, 0x110AF}, {0x110B0, 0x110B2}, + {0x110B3, 0x110B6}, {0x110B7, 0x110B8}, {0x110B9, 0x110BA}, + {0x110BB, 0x110BC}, {0x110BD, 0x110BD}, {0x110BE, 0x110C1}, + {0x110D0, 0x110E8}, {0x110F0, 0x110F9}, {0x11100, 0x11102}, + {0x11103, 0x11126}, {0x11127, 0x1112B}, {0x1112C, 0x1112C}, + {0x1112D, 0x11134}, {0x11136, 0x1113F}, {0x11140, 0x11143}, + {0x11150, 0x11172}, {0x11173, 0x11173}, {0x11174, 0x11175}, + {0x11176, 0x11176}, {0x11180, 0x11181}, {0x11182, 0x11182}, + {0x11183, 0x111B2}, {0x111B3, 0x111B5}, {0x111B6, 0x111BE}, + {0x111BF, 0x111C0}, {0x111C1, 0x111C4}, {0x111C5, 0x111C9}, + {0x111CA, 0x111CC}, {0x111CD, 0x111CD}, {0x111D0, 0x111D9}, + {0x111DA, 0x111DA}, {0x111DB, 0x111DB}, {0x111DC, 0x111DC}, + {0x111DD, 0x111DF}, {0x111E1, 0x111F4}, {0x11200, 0x11211}, + {0x11213, 0x1122B}, {0x1122C, 0x1122E}, {0x1122F, 0x11231}, + {0x11232, 0x11233}, {0x11234, 0x11234}, {0x11235, 0x11235}, + {0x11236, 0x11237}, {0x11238, 0x1123D}, {0x1123E, 0x1123E}, + {0x11280, 0x11286}, {0x11288, 0x11288}, {0x1128A, 0x1128D}, + {0x1128F, 0x1129D}, {0x1129F, 0x112A8}, {0x112A9, 0x112A9}, + {0x112B0, 0x112DE}, {0x112DF, 0x112DF}, {0x112E0, 0x112E2}, + {0x112E3, 0x112EA}, {0x112F0, 0x112F9}, {0x11300, 0x11301}, + {0x11302, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310}, + {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333}, + {0x11335, 0x11339}, {0x1133C, 0x1133C}, {0x1133D, 0x1133D}, + {0x1133E, 0x1133F}, {0x11340, 0x11340}, {0x11341, 0x11344}, + {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11350, 0x11350}, + {0x11357, 0x11357}, {0x1135D, 0x11361}, {0x11362, 0x11363}, + {0x11366, 0x1136C}, {0x11370, 0x11374}, {0x11400, 0x11434}, + {0x11435, 0x11437}, {0x11438, 0x1143F}, {0x11440, 0x11441}, + {0x11442, 0x11444}, {0x11445, 0x11445}, {0x11446, 0x11446}, + {0x11447, 0x1144A}, {0x1144B, 0x1144F}, {0x11450, 0x11459}, + {0x1145B, 0x1145B}, {0x1145D, 0x1145D}, {0x11480, 0x114AF}, + {0x114B0, 0x114B2}, {0x114B3, 0x114B8}, {0x114B9, 0x114B9}, + {0x114BA, 0x114BA}, {0x114BB, 0x114BE}, {0x114BF, 0x114C0}, + {0x114C1, 0x114C1}, {0x114C2, 0x114C3}, {0x114C4, 0x114C5}, + {0x114C6, 0x114C6}, {0x114C7, 0x114C7}, {0x114D0, 0x114D9}, + {0x11580, 0x115AE}, {0x115AF, 0x115B1}, {0x115B2, 0x115B5}, + {0x115B8, 0x115BB}, {0x115BC, 0x115BD}, {0x115BE, 0x115BE}, + {0x115BF, 0x115C0}, {0x115C1, 0x115D7}, {0x115D8, 0x115DB}, + {0x115DC, 0x115DD}, {0x11600, 0x1162F}, {0x11630, 0x11632}, + {0x11633, 0x1163A}, {0x1163B, 0x1163C}, {0x1163D, 0x1163D}, + {0x1163E, 0x1163E}, {0x1163F, 0x11640}, {0x11641, 0x11643}, + {0x11644, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C}, + {0x11680, 0x116AA}, {0x116AB, 0x116AB}, {0x116AC, 0x116AC}, + {0x116AD, 0x116AD}, {0x116AE, 0x116AF}, {0x116B0, 0x116B5}, + {0x116B6, 0x116B6}, {0x116B7, 0x116B7}, {0x116C0, 0x116C9}, + {0x11700, 0x11719}, {0x1171D, 0x1171F}, {0x11720, 0x11721}, + {0x11722, 0x11725}, {0x11726, 0x11726}, {0x11727, 0x1172B}, + {0x11730, 0x11739}, {0x1173A, 0x1173B}, {0x1173C, 0x1173E}, + {0x1173F, 0x1173F}, {0x118A0, 0x118DF}, {0x118E0, 0x118E9}, + {0x118EA, 0x118F2}, {0x118FF, 0x118FF}, {0x11AC0, 0x11AF8}, + {0x11C00, 0x11C08}, {0x11C0A, 0x11C2E}, {0x11C2F, 0x11C2F}, + {0x11C30, 0x11C36}, {0x11C38, 0x11C3D}, {0x11C3E, 0x11C3E}, + {0x11C3F, 0x11C3F}, {0x11C40, 0x11C40}, {0x11C41, 0x11C45}, + {0x11C50, 0x11C59}, {0x11C5A, 0x11C6C}, {0x11C70, 0x11C71}, + {0x11C72, 0x11C8F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CA9}, + {0x11CAA, 0x11CB0}, {0x11CB1, 0x11CB1}, {0x11CB2, 0x11CB3}, + {0x11CB4, 0x11CB4}, {0x11CB5, 0x11CB6}, {0x12000, 0x12399}, + {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543}, + {0x13000, 0x1342E}, {0x14400, 0x14646}, {0x16800, 0x16A38}, + {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, {0x16A6E, 0x16A6F}, + {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF4}, {0x16AF5, 0x16AF5}, + {0x16B00, 0x16B2F}, {0x16B30, 0x16B36}, {0x16B37, 0x16B3B}, + {0x16B3C, 0x16B3F}, {0x16B40, 0x16B43}, {0x16B44, 0x16B44}, + {0x16B45, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, + {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16F00, 0x16F44}, + {0x16F50, 0x16F50}, {0x16F51, 0x16F7E}, {0x16F8F, 0x16F92}, + {0x16F93, 0x16F9F}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, + {0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BC9C}, + {0x1BC9D, 0x1BC9E}, {0x1BC9F, 0x1BC9F}, {0x1BCA0, 0x1BCA3}, + {0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D164}, + {0x1D165, 0x1D166}, {0x1D167, 0x1D169}, {0x1D16A, 0x1D16C}, + {0x1D16D, 0x1D172}, {0x1D173, 0x1D17A}, {0x1D17B, 0x1D182}, + {0x1D183, 0x1D184}, {0x1D185, 0x1D18B}, {0x1D18C, 0x1D1A9}, + {0x1D1AA, 0x1D1AD}, {0x1D1AE, 0x1D1E8}, {0x1D200, 0x1D241}, + {0x1D242, 0x1D244}, {0x1D245, 0x1D245}, {0x1D300, 0x1D356}, + {0x1D360, 0x1D371}, {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, + {0x1D49E, 0x1D49F}, {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, + {0x1D4A9, 0x1D4AC}, {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, + {0x1D4BD, 0x1D4C3}, {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, + {0x1D50D, 0x1D514}, {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, + {0x1D53B, 0x1D53E}, {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, + {0x1D54A, 0x1D550}, {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D6C0}, + {0x1D6C1, 0x1D6C1}, {0x1D6C2, 0x1D6DA}, {0x1D6DB, 0x1D6DB}, + {0x1D6DC, 0x1D6FA}, {0x1D6FB, 0x1D6FB}, {0x1D6FC, 0x1D714}, + {0x1D715, 0x1D715}, {0x1D716, 0x1D734}, {0x1D735, 0x1D735}, + {0x1D736, 0x1D74E}, {0x1D74F, 0x1D74F}, {0x1D750, 0x1D76E}, + {0x1D76F, 0x1D76F}, {0x1D770, 0x1D788}, {0x1D789, 0x1D789}, + {0x1D78A, 0x1D7A8}, {0x1D7A9, 0x1D7A9}, {0x1D7AA, 0x1D7C2}, + {0x1D7C3, 0x1D7C3}, {0x1D7C4, 0x1D7CB}, {0x1D7CE, 0x1D7FF}, + {0x1D800, 0x1D9FF}, {0x1DA00, 0x1DA36}, {0x1DA37, 0x1DA3A}, + {0x1DA3B, 0x1DA6C}, {0x1DA6D, 0x1DA74}, {0x1DA75, 0x1DA75}, + {0x1DA76, 0x1DA83}, {0x1DA84, 0x1DA84}, {0x1DA85, 0x1DA86}, + {0x1DA87, 0x1DA8B}, {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, + {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, + {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, {0x1E800, 0x1E8C4}, + {0x1E8C7, 0x1E8CF}, {0x1E8D0, 0x1E8D6}, {0x1E900, 0x1E943}, + {0x1E944, 0x1E94A}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F}, + {0x1EE00, 0x1EE03}, {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, + {0x1EE24, 0x1EE24}, {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, + {0x1EE34, 0x1EE37}, {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, + {0x1EE42, 0x1EE42}, {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, + {0x1EE4B, 0x1EE4B}, {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, + {0x1EE54, 0x1EE54}, {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, + {0x1EE5B, 0x1EE5B}, {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, + {0x1EE61, 0x1EE62}, {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, + {0x1EE6C, 0x1EE72}, {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, + {0x1EE7E, 0x1EE7E}, {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, + {0x1EEA1, 0x1EEA3}, {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, + {0x1EEF0, 0x1EEF1}, {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, + {0x1F030, 0x1F093}, {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, + {0x1F0C1, 0x1F0CE}, {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10C}, + {0x1F12E, 0x1F12E}, {0x1F16A, 0x1F16B}, {0x1F1E6, 0x1F1FF}, + {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, + {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF}, + {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F}, + {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A}, + {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594}, + {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F}, + {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6E0, 0x1F6EA}, + {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773}, {0x1F780, 0x1F7D4}, + {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, {0x1F850, 0x1F859}, + {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, {0xE0001, 0xE0001}, + {0xE0020, 0xE007F}, +} + +// Condition have flag EastAsianWidth whether the current locale is CJK or not. +type Condition struct { + EastAsianWidth bool +} + +// NewCondition return new instance of Condition which is current locale. +func NewCondition() *Condition { + return &Condition{EastAsianWidth} +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func (c *Condition) RuneWidth(r rune) int { + switch { + case r < 0 || r > 0x10FFFF || + inTables(r, nonprint, combining, notassigned): + return 0 + case (c.EastAsianWidth && IsAmbiguousWidth(r)) || + inTables(r, doublewidth, emoji): + return 2 + default: + return 1 + } +} + +// StringWidth return width as you can see +func (c *Condition) StringWidth(s string) (width int) { + for _, r := range []rune(s) { + width += c.RuneWidth(r) + } + return width +} + +// Truncate return string truncated with w cells +func (c *Condition) Truncate(s string, w int, tail string) string { + if c.StringWidth(s) <= w { + return s + } + r := []rune(s) + tw := c.StringWidth(tail) + w -= tw + width := 0 + i := 0 + for ; i < len(r); i++ { + cw := c.RuneWidth(r[i]) + if width+cw > w { + break + } + width += cw + } + return string(r[0:i]) + tail +} + +// Wrap return string wrapped with w cells +func (c *Condition) Wrap(s string, w int) string { + width := 0 + out := "" + for _, r := range []rune(s) { + cw := RuneWidth(r) + if r == '\n' { + out += string(r) + width = 0 + continue + } else if width+cw > w { + out += "\n" + width = 0 + out += string(r) + width += cw + continue + } + out += string(r) + width += cw + } + return out +} + +// FillLeft return string filled in left by spaces in w cells +func (c *Condition) FillLeft(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return string(b) + s + } + return s +} + +// FillRight return string filled in left by spaces in w cells +func (c *Condition) FillRight(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return s + string(b) + } + return s +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func RuneWidth(r rune) int { + return DefaultCondition.RuneWidth(r) +} + +// IsAmbiguousWidth returns whether is ambiguous width or not. +func IsAmbiguousWidth(r rune) bool { + return inTables(r, private, ambiguous) +} + +// IsNeutralWidth returns whether is neutral width or not. +func IsNeutralWidth(r rune) bool { + return inTable(r, neutral) +} + +// StringWidth return width as you can see +func StringWidth(s string) (width int) { + return DefaultCondition.StringWidth(s) +} + +// Truncate return string truncated with w cells +func Truncate(s string, w int, tail string) string { + return DefaultCondition.Truncate(s, w, tail) +} + +// Wrap return string wrapped with w cells +func Wrap(s string, w int) string { + return DefaultCondition.Wrap(s, w) +} + +// FillLeft return string filled in left by spaces in w cells +func FillLeft(s string, w int) string { + return DefaultCondition.FillLeft(s, w) +} + +// FillRight return string filled in left by spaces in w cells +func FillRight(s string, w int) string { + return DefaultCondition.FillRight(s, w) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go new file mode 100644 index 000000000..0ce32c5e7 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_js.go @@ -0,0 +1,8 @@ +// +build js + +package runewidth + +func IsEastAsian() bool { + // TODO: Implement this for the web. Detect east asian in a compatible way, and return true. + return false +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go new file mode 100644 index 000000000..c579e9a31 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go @@ -0,0 +1,77 @@ +// +build !windows,!js + +package runewidth + +import ( + "os" + "regexp" + "strings" +) + +var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`) + +var mblenTable = map[string]int{ + "utf-8": 6, + "utf8": 6, + "jis": 8, + "eucjp": 3, + "euckr": 2, + "euccn": 2, + "sjis": 2, + "cp932": 2, + "cp51932": 2, + "cp936": 2, + "cp949": 2, + "cp950": 2, + "big5": 2, + "gbk": 2, + "gb2312": 2, +} + +func isEastAsian(locale string) bool { + charset := strings.ToLower(locale) + r := reLoc.FindStringSubmatch(locale) + if len(r) == 2 { + charset = strings.ToLower(r[1]) + } + + if strings.HasSuffix(charset, "@cjk_narrow") { + return false + } + + for pos, b := range []byte(charset) { + if b == '@' { + charset = charset[:pos] + break + } + } + max := 1 + if m, ok := mblenTable[charset]; ok { + max = m + } + if max > 1 && (charset[0] != 'u' || + strings.HasPrefix(locale, "ja") || + strings.HasPrefix(locale, "ko") || + strings.HasPrefix(locale, "zh")) { + return true + } + return false +} + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + locale := os.Getenv("LC_CTYPE") + if locale == "" { + locale = os.Getenv("LANG") + } + + // ignore C locale + if locale == "POSIX" || locale == "C" { + return false + } + if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') { + return false + } + + return isEastAsian(locale) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go new file mode 100644 index 000000000..0258876b9 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go @@ -0,0 +1,25 @@ +package runewidth + +import ( + "syscall" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32") + procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP") +) + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + r1, _, _ := procGetConsoleOutputCP.Call() + if r1 == 0 { + return false + } + + switch int(r1) { + case 932, 51932, 936, 949, 950: + return true + } + + return false +} diff --git a/vendor/vendor.json b/vendor/vendor.json index eda4727e4..e47378fdf 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -816,6 +816,12 @@ "path": "github.com/hashicorp/yamux", "revision": "df949784da9ed028ee76df44652e42d37a09d7e4" }, + { + "checksumSHA1": "28nznojcl6Ejtm6I1tKozgy0isg=", + "path": "github.com/jlaffaye/ftp", + "revision": "025815df64030c144b86e368fedf80ee195fe464", + "revisionTime": "2016-02-29T21:52:38Z" + }, { "checksumSHA1": "3/Bhy+ua/DCv2ElMD5GzOYSGN6g=", "comment": "0.2.2-2-gc01cf91", @@ -917,6 +923,12 @@ "path": "github.com/mattn/go-isatty", "revision": "56b76bdf51f7708750eac80fa38b952bb9f32639" }, + { + "checksumSHA1": "MNkKJyk2TazKMJYbal5wFHybpyA=", + "path": "github.com/mattn/go-runewidth", + "revision": "14207d285c6c197daabb5c9793d63e7af9ab2d50", + "revisionTime": "2017-02-01T02:35:40Z" + }, { "checksumSHA1": "UP+pXl+ic9y6qrpZA5MqDIAuGfw=", "path": "github.com/mitchellh/cli", From e940dc7e429834095abd53303fe2889ded272f71 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Wed, 1 Mar 2017 14:52:55 -0600 Subject: [PATCH 043/251] Fixed a config_test that should've failed but didn't because ftp:// uris work now. HeH! --- builder/virtualbox/ovf/config_test.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/builder/virtualbox/ovf/config_test.go b/builder/virtualbox/ovf/config_test.go index 5e67b242d..e9c536e54 100644 --- a/builder/virtualbox/ovf/config_test.go +++ b/builder/virtualbox/ovf/config_test.go @@ -76,17 +76,6 @@ func TestNewConfig_sourcePath(t *testing.T) { t.Fatalf("Nonexistant file should throw a validation error!") } - // Bad - c = testConfig(t) - c["source_path"] = "ftp://i/dont/exist" - _, warns, err = NewConfig(c) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should error") - } - // Good tf := getTempFile(t) defer os.Remove(tf.Name()) From d275bacb0f8e9ffa45787bd954742cc9ef62c115 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 23 Mar 2017 14:51:45 -0500 Subject: [PATCH 044/251] go fmt builder/vmware/iso/step_create_vmx.go to calm down Travis CI. --- builder/vmware/iso/step_create_vmx.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index 62546d212..3f3644e70 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -44,7 +44,7 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) // Convert the iso_path into a path relative to the .vmx file if possible - if relativeIsoPath,err := filepath.Rel(config.VMXTemplatePath, filepath.FromSlash(isoPath)); err == nil { + if relativeIsoPath, err := filepath.Rel(config.VMXTemplatePath, filepath.FromSlash(isoPath)); err == nil { isoPath = relativeIsoPath } From 4783b6508e69d34d65dc32db30e85842ba0d4985 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 23 Mar 2017 15:12:25 -0500 Subject: [PATCH 045/251] Fix common/download_test.go to avoid formatting the volume name to a hidden windows share when not on windows. --- common/download_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/common/download_test.go b/common/download_test.go index 81cc3244f..114ca8ec3 100644 --- a/common/download_test.go +++ b/common/download_test.go @@ -452,7 +452,10 @@ func TestFileUriTransforms(t *testing.T) { cwd = filepath.ToSlash(cwd) volume = filepath.VolumeName(cwd) share = volume - if share[len(share)-1] == ':' { + + // if a volume was found (on windows), replace the ':' from + // C: to C$ to convert it into a hidden windows share. + if len(share) > 1 && share[len(share)-1] == ':' { share = share[:len(share)-1] + "$" } cwd = cwd[len(volume):] From 5a4ce2165c22edeec96f1f02b259cbe3e45b30be Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 23 Mar 2017 15:29:38 -0500 Subject: [PATCH 046/251] Modified common/download_test.go to not test the smb:// uri on platforms other than windows. Added an immediate platform error to SMBDownloader.Download as opposed to letting .toPath return it (which would have left the structure partially initialized). --- common/download.go | 6 ++++++ common/download_test.go | 24 ++++++++++++++---------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/common/download.go b/common/download.go index 110cb1536..5d3621d1c 100644 --- a/common/download.go +++ b/common/download.go @@ -637,6 +637,12 @@ func (d *SMBDownloader) toPath(base string, uri url.URL) (string, error) { } func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { + + /* first we warn the world if we're not running windows */ + if runtime.GOOS != "windows" { + return fmt.Errorf("Support for SMB based uri's are not supported on %s", runtime.GOOS) + } + d.active = false /* convert the uri using the net/url module to a UNC path */ diff --git a/common/download_test.go b/common/download_test.go index 114ca8ec3..a62fceb96 100644 --- a/common/download_test.go +++ b/common/download_test.go @@ -483,15 +483,19 @@ func TestFileUriTransforms(t *testing.T) { t.Logf("TestFileUriTransforms : Result Path '%s'", res) } - // ...and finally the oddball windows native path - // smb://host/sharename/file -> \\host\sharename\file - testcase := host + "/" + share + "/" + cwd[1:] + "/%s" - uri := "smb://" + fmt.Sprintf(testcase, testpath) - t.Logf("TestFileUriTransforms : Trying Uri '%s'", uri) - res, err := SimulateFileUriDownload(t, uri) - if err != nil { - t.Errorf("Unable to transform uri '%s' into a path", uri) - return + // smb protocol depends on platform support which currently + // only exists on windows. + if runtime.GOOS == "windows" { + // ...and finally the oddball windows native path + // smb://host/sharename/file -> \\host\sharename\file + testcase := host + "/" + share + "/" + cwd[1:] + "/%s" + uri := "smb://" + fmt.Sprintf(testcase, testpath) + t.Logf("TestFileUriTransforms : Trying Uri '%s'", uri) + res, err := SimulateFileUriDownload(t, uri) + if err != nil { + t.Errorf("Unable to transform uri '%s' into a path", uri) + return + } + t.Logf("TestFileUriTransforms : Result Path '%s'", res) } - t.Logf("TestFileUriTransforms : Result Path '%s'", res) } From c978e27f0f7e091092fbc863665370b2c9088b32 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 23 Mar 2017 15:36:40 -0500 Subject: [PATCH 047/251] grr. removed an assignment that was dead in common/download.go. --- common/download.go | 1 - 1 file changed, 1 deletion(-) diff --git a/common/download.go b/common/download.go index 5d3621d1c..188a8ca07 100644 --- a/common/download.go +++ b/common/download.go @@ -161,7 +161,6 @@ func (d *DownloadClient) Get() (string, error) { local, ok := d.downloader.(LocalDownloader) if !ok && !d.config.CopyFile { return "", fmt.Errorf("Not allowed to use uri scheme %s in no copy file mode : %T", u.Scheme, d.downloader) - d.config.CopyFile = true } // If we're copying the file, then just use the actual downloader From d85883582fb363ff817ffeb9418c9de53726f7a1 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Wed, 6 Dec 2017 16:29:00 -0600 Subject: [PATCH 048/251] Changed a critical error to a non-critical one when dealing with the strange .CopyFile flag in common/download.go. --- common/download.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/download.go b/common/download.go index 188a8ca07..46cc1213e 100644 --- a/common/download.go +++ b/common/download.go @@ -155,12 +155,12 @@ func (d *DownloadClient) Get() (string, error) { remote, ok := d.downloader.(RemoteDownloader) if !ok { - return "", fmt.Errorf("Unable to treat uri scheme %s as a Downloader : %T", u.Scheme, d.downloader) + return "", fmt.Errorf("Unable to treat uri scheme %s as a Downloader. : %T", u.Scheme, d.downloader) } local, ok := d.downloader.(LocalDownloader) if !ok && !d.config.CopyFile { - return "", fmt.Errorf("Not allowed to use uri scheme %s in no copy file mode : %T", u.Scheme, d.downloader) + d.config.CopyFile = true } // If we're copying the file, then just use the actual downloader From 69e5eec1ce5378b887d34afb24ece895a763b495 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Wed, 6 Dec 2017 17:57:03 -0600 Subject: [PATCH 049/251] Consolidated the progress-bar's format into common/step_download.go. Removed DownloadClient's PercentProgress callback since cheggaaa's progress-bar already does that. --- common/download.go | 124 ++++++++++++++++++++++------------------ common/step_download.go | 33 +++++++---- 2 files changed, 91 insertions(+), 66 deletions(-) diff --git a/common/download.go b/common/download.go index 46cc1213e..be53dfed4 100644 --- a/common/download.go +++ b/common/download.go @@ -83,19 +83,19 @@ func HashForType(t string) hash.Hash { // NewDownloadClient returns a new DownloadClient for the given // configuration. -func NewDownloadClient(c *DownloadConfig) *DownloadClient { +func NewDownloadClient(c *DownloadConfig, bar *pb.ProgressBar) *DownloadClient { const mtu = 1500 /* ethernet */ - 20 /* ipv4 */ - 20 /* tcp */ if c.DownloaderMap == nil { + // Create downloader map c.DownloaderMap = map[string]Downloader{ - "file": &FileDownloader{bufferSize: nil}, - "ftp": &FTPDownloader{userInfo: url.UserPassword("anonymous", "anonymous@"), mtu: mtu}, - "http": &HTTPDownloader{userAgent: c.UserAgent}, - "https": &HTTPDownloader{userAgent: c.UserAgent}, - "smb": &SMBDownloader{bufferSize: nil}, + "file": &FileDownloader{progress: bar, bufferSize: nil}, + "ftp": &FTPDownloader{progress: bar, userInfo: url.UserPassword("anonymous", "anonymous@"), mtu: mtu}, + "http": &HTTPDownloader{progress: bar, userAgent: c.UserAgent}, + "https": &HTTPDownloader{progress: bar, userAgent: c.UserAgent}, + "smb": &SMBDownloader{progress: bar, bufferSize: nil}, } } - return &DownloadClient{config: c} } @@ -209,14 +209,6 @@ func (d *DownloadClient) Get() (string, error) { return finalPath, err } -// PercentProgress returns the download progress as a percentage. -func (d *DownloadClient) PercentProgress() int { - if d.downloader == nil { - return -1 - } - return int((float64(d.downloader.Progress()) / float64(d.downloader.Total())) * 100) -} - // VerifyChecksum tests that the path matches the checksum for the // download. func (d *DownloadClient) VerifyChecksum(path string) (bool, error) { @@ -239,9 +231,11 @@ func (d *DownloadClient) VerifyChecksum(path string) (bool, error) { // HTTPDownloader is an implementation of Downloader that downloads // files over HTTP. type HTTPDownloader struct { - progress uint64 + current uint64 total uint64 userAgent string + + progress *pb.ProgressBar } func (d *HTTPDownloader) Cancel() { @@ -261,7 +255,7 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { } // Reset our progress - d.progress = 0 + d.current = 0 // Make the request. We first make a HEAD request so we can check // if the server supports range queries. If the server/URL doesn't @@ -290,7 +284,7 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { if _, err = dst.Seek(0, os.SEEK_END); err == nil { req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) - d.progress = uint64(fi.Size()) + d.current = uint64(fi.Size()) } } } @@ -304,9 +298,11 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { return err } - d.total = d.progress + uint64(resp.ContentLength) - progressBar := pb.New64(int64(d.Total())).Start() - progressBar.Set64(int64(d.Progress())) + d.total = d.current + uint64(resp.ContentLength) + + d.progress.Total = int64(d.total) + progressBar := d.progress.Start() + progressBar.Set64(int64(d.current)) var buffer [4096]byte for { @@ -315,8 +311,8 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { return err } - d.progress += uint64(n) - progressBar.Set64(int64(d.Progress())) + d.current += uint64(n) + progressBar.Set64(int64(d.current)) if _, werr := dst.Write(buffer[:n]); werr != nil { return werr @@ -326,12 +322,12 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { break } } - + progressBar.Finish() return nil } func (d *HTTPDownloader) Progress() uint64 { - return d.progress + return d.current } func (d *HTTPDownloader) Total() uint64 { @@ -344,13 +340,15 @@ type FTPDownloader struct { userInfo *url.Userinfo mtu uint - active bool - progress uint64 - total uint64 + active bool + current uint64 + total uint64 + + progress *pb.ProgressBar } func (d *FTPDownloader) Progress() uint64 { - return d.progress + return d.current } func (d *FTPDownloader) Total() uint64 { @@ -438,13 +436,15 @@ func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { } log.Printf("Found file : %s : %v bytes\n", entry.Name, entry.Size) - d.progress = 0 + d.current = 0 d.total = entry.Size - progressBar := pb.New64(int64(d.Total())).Start() + + d.progress.Total = int64(d.total) + progressBar := d.progress.Start() // download specified file d.active = true - reader, err := cli.RetrFrom(uri.Path, d.progress) + reader, err := cli.RetrFrom(uri.Path, d.current) if err != nil { return nil } @@ -458,8 +458,8 @@ func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { break } - d.progress += uint64(n) - progressBar.Set64(int64(d.Progress())) + d.current += uint64(n) + progressBar.Set64(int64(d.current)) } d.active = false e <- err @@ -467,10 +467,12 @@ func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { // spin until it's done err = <-errch + + progressBar.Finish() reader.Close() - if err == nil && d.progress != d.total { - err = fmt.Errorf("FTP total transfer size was %d when %d was expected", d.progress, d.total) + if err == nil && d.current != d.total { + err = fmt.Errorf("FTP total transfer size was %d when %d was expected", d.current, d.total) } // log out @@ -483,13 +485,15 @@ func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { type FileDownloader struct { bufferSize *uint - active bool - progress uint64 - total uint64 + active bool + current uint64 + total uint64 + + progress *pb.ProgressBar } func (d *FileDownloader) Progress() uint64 { - return d.progress + return d.current } func (d *FileDownloader) Total() uint64 { @@ -549,7 +553,7 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { } /* download the file using the operating system's facilities */ - d.progress = 0 + d.current = 0 d.active = true f, err := os.Open(realpath) @@ -564,7 +568,9 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { return err } d.total = uint64(fi.Size()) - progressBar := pb.New64(int64(d.Total())).Start() + + d.progress.Total = int64(d.total) + progressBar := d.progress.Start() // no bufferSize specified, so copy synchronously. if d.bufferSize == nil { @@ -572,8 +578,8 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { n, err = io.Copy(dst, f) d.active = false - d.progress += uint64(n) - progressBar.Set64(int64(d.Progress())) + d.current += uint64(n) + progressBar.Set64(int64(d.current)) // use a goro in case someone else wants to enable cancel/resume } else { @@ -585,8 +591,8 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { break } - d.progress += uint64(n) - progressBar.Set64(int64(d.Progress())) + d.current += uint64(n) + progressBar.Set64(int64(d.current)) } d.active = false e <- err @@ -595,6 +601,7 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { // ...and we spin until it's done err = <-errch } + progressBar.Finish() f.Close() return err } @@ -604,13 +611,15 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { type SMBDownloader struct { bufferSize *uint - active bool - progress uint64 - total uint64 + active bool + current uint64 + total uint64 + + progress *pb.ProgressBar } func (d *SMBDownloader) Progress() uint64 { - return d.progress + return d.current } func (d *SMBDownloader) Total() uint64 { @@ -663,7 +672,7 @@ func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { } /* Open up the "\\"-prefixed path using the Windows filesystem */ - d.progress = 0 + d.current = 0 d.active = true f, err := os.Open(realpath) @@ -678,7 +687,9 @@ func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { return err } d.total = uint64(fi.Size()) - progressBar := pb.New64(int64(d.Total())).Start() + + d.progress.Total = int64(d.total) + progressBar := d.progress.Start() // no bufferSize specified, so copy synchronously. if d.bufferSize == nil { @@ -686,8 +697,8 @@ func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { n, err = io.Copy(dst, f) d.active = false - d.progress += uint64(n) - progressBar.Set64(int64(d.Progress())) + d.current += uint64(n) + progressBar.Set64(int64(d.current)) // use a goro in case someone else wants to enable cancel/resume } else { @@ -699,8 +710,8 @@ func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { break } - d.progress += uint64(n) - progressBar.Set64(int64(d.Progress())) + d.current += uint64(n) + progressBar.Set64(int64(d.current)) } d.active = false e <- err @@ -709,6 +720,7 @@ func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { // ...and as usual we spin until it's done err = <-errch } + progressBar.Finish() f.Close() return err } diff --git a/common/step_download.go b/common/step_download.go index 765a07d23..2ce48cfff 100644 --- a/common/step_download.go +++ b/common/step_download.go @@ -9,6 +9,8 @@ import ( "github.com/hashicorp/packer/packer" "github.com/mitchellh/multistep" + + "gopkg.in/cheggaaa/pb.v1" ) // StepDownload downloads a remote file using the download client within @@ -139,7 +141,23 @@ func (s *StepDownload) Cleanup(multistep.StateBag) {} func (s *StepDownload) download(config *DownloadConfig, state multistep.StateBag) (string, error, bool) { var path string ui := state.Get("ui").(packer.Ui) - download := NewDownloadClient(config) + + // design the appearance of the progress bar + bar := pb.New64(0) + bar.ShowPercent = true + bar.ShowCounters = true + bar.ShowSpeed = false + bar.ShowBar = true + bar.ShowTimeLeft = false + bar.ShowFinalTime = false + bar.SetUnits(pb.U_BYTES) + bar.Format("[=>-]") + bar.SetRefreshRate(1 * time.Second) + bar.SetWidth(25) + bar.Callback = ui.Message + + // create download client with config and progress bar + download := NewDownloadClient(config, bar) downloadCompleteCh := make(chan error, 1) go func() { @@ -148,24 +166,19 @@ func (s *StepDownload) download(config *DownloadConfig, state multistep.StateBag downloadCompleteCh <- err }() - progressTicker := time.NewTicker(5 * time.Second) - defer progressTicker.Stop() - for { select { case err := <-downloadCompleteCh: + bar.Finish() + if err != nil { return "", err, true } - return path, nil, true - case <-progressTicker.C: - progress := download.PercentProgress() - if progress >= 0 { - ui.Message(fmt.Sprintf("Download progress: %d%%", progress)) - } + case <-time.After(1 * time.Second): if _, ok := state.GetOk(multistep.StateCancelled); ok { + bar.Finish() ui.Say("Interrupt received. Cancelling download...") return "", nil, false } From 8c6efe336ce4ed5fea9fa816f9ce98d2f51a8dc4 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 7 Dec 2017 11:57:40 -0600 Subject: [PATCH 050/251] Added second argument for custom-formatted progress-bar to NewDownloadClient in common/download_test.go. This second parameter was added as a result of commit f0bd9018f3e318caafb1fe7d46e04c470e07c092 which lets you customize the progress-bar format. --- common/download.go | 7 ++++++- common/download_test.go | 20 ++++++++++---------- common/step_download.go | 2 +- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/common/download.go b/common/download.go index be53dfed4..4826d1dd1 100644 --- a/common/download.go +++ b/common/download.go @@ -86,8 +86,13 @@ func HashForType(t string) hash.Hash { func NewDownloadClient(c *DownloadConfig, bar *pb.ProgressBar) *DownloadClient { const mtu = 1500 /* ethernet */ - 20 /* ipv4 */ - 20 /* tcp */ + // If no custom progress-bar was specified, then create a default one. + if bar == nil { + bar = pb.New64(0) + } + + // Create downloader map if it hasn't been specified already. if c.DownloaderMap == nil { - // Create downloader map c.DownloaderMap = map[string]Downloader{ "file": &FileDownloader{progress: bar, bufferSize: nil}, "ftp": &FTPDownloader{progress: bar, userInfo: url.UserPassword("anonymous", "anonymous@"), mtu: mtu}, diff --git a/common/download_test.go b/common/download_test.go index a62fceb96..50bdfa971 100644 --- a/common/download_test.go +++ b/common/download_test.go @@ -36,7 +36,7 @@ func TestDownloadClientVerifyChecksum(t *testing.T) { Checksum: checksum, } - d := NewDownloadClient(config) + d := NewDownloadClient(config, nil) result, err := d.VerifyChecksum(tf.Name()) if err != nil { t.Fatalf("Verify err: %s", err) @@ -59,7 +59,7 @@ func TestDownloadClient_basic(t *testing.T) { Url: ts.URL + "/basic.txt", TargetPath: tf.Name(), CopyFile: true, - }) + }, nil) path, err := client.Get() if err != nil { @@ -95,7 +95,7 @@ func TestDownloadClient_checksumBad(t *testing.T) { Hash: HashForType("md5"), Checksum: checksum, CopyFile: true, - }) + }, nil) if _, err := client.Get(); err == nil { t.Fatal("should error") } @@ -120,7 +120,7 @@ func TestDownloadClient_checksumGood(t *testing.T) { Hash: HashForType("md5"), Checksum: checksum, CopyFile: true, - }) + }, nil) path, err := client.Get() if err != nil { t.Fatalf("err: %s", err) @@ -151,7 +151,7 @@ func TestDownloadClient_checksumNoDownload(t *testing.T) { Hash: HashForType("md5"), Checksum: checksum, CopyFile: true, - }) + }, nil) path, err := client.Get() if err != nil { t.Fatalf("err: %s", err) @@ -190,7 +190,7 @@ func TestDownloadClient_resume(t *testing.T) { Url: ts.URL, TargetPath: tf.Name(), CopyFile: true, - }) + }, nil) path, err := client.Get() if err != nil { t.Fatalf("err: %s", err) @@ -250,7 +250,7 @@ func TestDownloadClient_usesDefaultUserAgent(t *testing.T) { CopyFile: true, } - client := NewDownloadClient(config) + client := NewDownloadClient(config, nil) _, err = client.Get() if err != nil { t.Fatal(err) @@ -282,7 +282,7 @@ func TestDownloadClient_setsUserAgent(t *testing.T) { CopyFile: true, } - client := NewDownloadClient(config) + client := NewDownloadClient(config, nil) _, err = client.Get() if err != nil { t.Fatal(err) @@ -381,7 +381,7 @@ func TestDownloadFileUrl(t *testing.T) { CopyFile: false, } - client := NewDownloadClient(config) + client := NewDownloadClient(config, nil) // Verify that we fail to match the checksum _, err = client.Get() @@ -412,7 +412,7 @@ func SimulateFileUriDownload(t *testing.T, uri string) (string, error) { } // go go go - client := NewDownloadClient(config) + client := NewDownloadClient(config, nil) path, err := client.Get() // ignore any non-important checksum errors if it's not a unc path diff --git a/common/step_download.go b/common/step_download.go index 2ce48cfff..a4f249290 100644 --- a/common/step_download.go +++ b/common/step_download.go @@ -96,7 +96,7 @@ func (s *StepDownload) Run(state multistep.StateBag) multistep.StepAction { } downloadConfigs[i] = config - if match, _ := NewDownloadClient(config).VerifyChecksum(config.TargetPath); match { + if match, _ := NewDownloadClient(config, nil).VerifyChecksum(config.TargetPath); match { ui.Message(fmt.Sprintf("Found already downloaded, initial checksum matched, no download needed: %s", url)) finalPath = config.TargetPath break From ab4490b9679765d77f275eb2ac958e5ac88c8d30 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Tue, 2 Jan 2018 17:26:25 -0600 Subject: [PATCH 051/251] Consolidated progress bar's appearance into the GetDefaultProgressBar() function. Updated dependency for cheggaaa's progress-bar from the gopkg.in location to the better maintained one on github.com. --- common/download.go | 19 ++- common/step_download.go | 27 ++-- .../pb.v1 => github.com/cheggaaa/pb}/LICENSE | 0 .../cheggaaa/pb}/README.md | 7 +- vendor/github.com/cheggaaa/pb/format.go | 118 ++++++++++++++++++ .../pb.v1 => github.com/cheggaaa/pb}/pb.go | 103 +++++++++------ .../cheggaaa/pb}/pb_win.go | 0 vendor/github.com/cheggaaa/pb/pb_x.go | 108 ++++++++++++++++ .../pb.v1 => github.com/cheggaaa/pb}/pool.go | 14 ++- .../cheggaaa/pb}/pool_win.go | 16 ++- .../cheggaaa/pb}/pool_x.go | 13 +- .../cheggaaa/pb}/reader.go | 0 .../cheggaaa/pb}/runecount.go | 2 +- .../cheggaaa/pb}/termios_bsd.go | 0 vendor/github.com/cheggaaa/pb/termios_sysv.go | 13 ++ vendor/gopkg.in/cheggaaa/pb.v1/format.go | 87 ------------- vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go | 8 -- vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go | 6 - vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go | 110 ---------------- vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go | 7 -- vendor/vendor.json | 12 +- 21 files changed, 373 insertions(+), 297 deletions(-) rename vendor/{gopkg.in/cheggaaa/pb.v1 => github.com/cheggaaa/pb}/LICENSE (100%) rename vendor/{gopkg.in/cheggaaa/pb.v1 => github.com/cheggaaa/pb}/README.md (89%) create mode 100644 vendor/github.com/cheggaaa/pb/format.go rename vendor/{gopkg.in/cheggaaa/pb.v1 => github.com/cheggaaa/pb}/pb.go (82%) rename vendor/{gopkg.in/cheggaaa/pb.v1 => github.com/cheggaaa/pb}/pb_win.go (100%) create mode 100644 vendor/github.com/cheggaaa/pb/pb_x.go rename vendor/{gopkg.in/cheggaaa/pb.v1 => github.com/cheggaaa/pb}/pool.go (84%) rename vendor/{gopkg.in/cheggaaa/pb.v1 => github.com/cheggaaa/pb}/pool_win.go (64%) rename vendor/{gopkg.in/cheggaaa/pb.v1 => github.com/cheggaaa/pb}/pool_x.go (59%) rename vendor/{gopkg.in/cheggaaa/pb.v1 => github.com/cheggaaa/pb}/reader.go (100%) rename vendor/{gopkg.in/cheggaaa/pb.v1 => github.com/cheggaaa/pb}/runecount.go (90%) rename vendor/{gopkg.in/cheggaaa/pb.v1 => github.com/cheggaaa/pb}/termios_bsd.go (100%) create mode 100644 vendor/github.com/cheggaaa/pb/termios_sysv.go delete mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/format.go delete mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go delete mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go delete mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go delete mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go diff --git a/common/download.go b/common/download.go index 4826d1dd1..e5ebe305f 100644 --- a/common/download.go +++ b/common/download.go @@ -20,8 +20,8 @@ import ( // imports related to each Downloader implementation import ( + "github.com/cheggaaa/pb" "github.com/jlaffaye/ftp" - "gopkg.in/cheggaaa/pb.v1" "io" "net/http" "path/filepath" @@ -83,22 +83,17 @@ func HashForType(t string) hash.Hash { // NewDownloadClient returns a new DownloadClient for the given // configuration. -func NewDownloadClient(c *DownloadConfig, bar *pb.ProgressBar) *DownloadClient { +func NewDownloadClient(c *DownloadConfig, bar pb.ProgressBar) *DownloadClient { const mtu = 1500 /* ethernet */ - 20 /* ipv4 */ - 20 /* tcp */ - // If no custom progress-bar was specified, then create a default one. - if bar == nil { - bar = pb.New64(0) - } - // Create downloader map if it hasn't been specified already. if c.DownloaderMap == nil { c.DownloaderMap = map[string]Downloader{ - "file": &FileDownloader{progress: bar, bufferSize: nil}, - "ftp": &FTPDownloader{progress: bar, userInfo: url.UserPassword("anonymous", "anonymous@"), mtu: mtu}, - "http": &HTTPDownloader{progress: bar, userAgent: c.UserAgent}, - "https": &HTTPDownloader{progress: bar, userAgent: c.UserAgent}, - "smb": &SMBDownloader{progress: bar, bufferSize: nil}, + "file": &FileDownloader{progress: &bar, bufferSize: nil}, + "ftp": &FTPDownloader{progress: &bar, userInfo: url.UserPassword("anonymous", "anonymous@"), mtu: mtu}, + "http": &HTTPDownloader{progress: &bar, userAgent: c.UserAgent}, + "https": &HTTPDownloader{progress: &bar, userAgent: c.UserAgent}, + "smb": &SMBDownloader{progress: &bar, bufferSize: nil}, } } return &DownloadClient{config: c} diff --git a/common/step_download.go b/common/step_download.go index a4f249290..f483eaba6 100644 --- a/common/step_download.go +++ b/common/step_download.go @@ -7,10 +7,9 @@ import ( "log" "time" + "github.com/cheggaaa/pb" "github.com/hashicorp/packer/packer" "github.com/mitchellh/multistep" - - "gopkg.in/cheggaaa/pb.v1" ) // StepDownload downloads a remote file using the download client within @@ -63,6 +62,10 @@ func (s *StepDownload) Run(state multistep.StateBag) multistep.StepAction { ui.Say(fmt.Sprintf("Downloading or copying %s", s.Description)) + // Get a default-looking progress bar and connect it to the ui. + bar := GetDefaultProgressBar() + bar.Callback = ui.Message + // First try to use any already downloaded file // If it fails, proceed to regualar download logic @@ -96,7 +99,7 @@ func (s *StepDownload) Run(state multistep.StateBag) multistep.StepAction { } downloadConfigs[i] = config - if match, _ := NewDownloadClient(config, nil).VerifyChecksum(config.TargetPath); match { + if match, _ := NewDownloadClient(config, bar).VerifyChecksum(config.TargetPath); match { ui.Message(fmt.Sprintf("Found already downloaded, initial checksum matched, no download needed: %s", url)) finalPath = config.TargetPath break @@ -138,11 +141,7 @@ func (s *StepDownload) Run(state multistep.StateBag) multistep.StepAction { func (s *StepDownload) Cleanup(multistep.StateBag) {} -func (s *StepDownload) download(config *DownloadConfig, state multistep.StateBag) (string, error, bool) { - var path string - ui := state.Get("ui").(packer.Ui) - - // design the appearance of the progress bar +func GetDefaultProgressBar() pb.ProgressBar { bar := pb.New64(0) bar.ShowPercent = true bar.ShowCounters = true @@ -154,9 +153,19 @@ func (s *StepDownload) download(config *DownloadConfig, state multistep.StateBag bar.Format("[=>-]") bar.SetRefreshRate(1 * time.Second) bar.SetWidth(25) + + return *bar +} + +func (s *StepDownload) download(config *DownloadConfig, state multistep.StateBag) (string, error, bool) { + var path string + ui := state.Get("ui").(packer.Ui) + + // Get a default looking progress bar and connect it to the ui. + bar := GetDefaultProgressBar() bar.Callback = ui.Message - // create download client with config and progress bar + // Create download client with config and progress bar download := NewDownloadClient(config, bar) downloadCompleteCh := make(chan error, 1) diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/LICENSE b/vendor/github.com/cheggaaa/pb/LICENSE similarity index 100% rename from vendor/gopkg.in/cheggaaa/pb.v1/LICENSE rename to vendor/github.com/cheggaaa/pb/LICENSE diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/README.md b/vendor/github.com/cheggaaa/pb/README.md similarity index 89% rename from vendor/gopkg.in/cheggaaa/pb.v1/README.md rename to vendor/github.com/cheggaaa/pb/README.md index 31babb305..948545110 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/README.md +++ b/vendor/github.com/cheggaaa/pb/README.md @@ -1,7 +1,10 @@ # Terminal progress bar for Go -Simple progress bar for console programs. - +[![Join the chat at https://gitter.im/cheggaaa/pb](https://badges.gitter.im/cheggaaa/pb.svg)](https://gitter.im/cheggaaa/pb?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +Simple progress bar for console programs. + +Please check the new version https://github.com/cheggaaa/pb/tree/v2 (currently, it's beta) ## Installation diff --git a/vendor/github.com/cheggaaa/pb/format.go b/vendor/github.com/cheggaaa/pb/format.go new file mode 100644 index 000000000..0723561c2 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/format.go @@ -0,0 +1,118 @@ +package pb + +import ( + "fmt" + "time" +) + +type Units int + +const ( + // U_NO are default units, they represent a simple value and are not formatted at all. + U_NO Units = iota + // U_BYTES units are formatted in a human readable way (B, KiB, MiB, ...) + U_BYTES + // U_BYTES_DEC units are like U_BYTES, but base 10 (B, KB, MB, ...) + U_BYTES_DEC + // U_DURATION units are formatted in a human readable way (3h14m15s) + U_DURATION +) + +const ( + KiB = 1024 + MiB = 1048576 + GiB = 1073741824 + TiB = 1099511627776 + + KB = 1e3 + MB = 1e6 + GB = 1e9 + TB = 1e12 +) + +func Format(i int64) *formatter { + return &formatter{n: i} +} + +type formatter struct { + n int64 + unit Units + width int + perSec bool +} + +func (f *formatter) To(unit Units) *formatter { + f.unit = unit + return f +} + +func (f *formatter) Width(width int) *formatter { + f.width = width + return f +} + +func (f *formatter) PerSec() *formatter { + f.perSec = true + return f +} + +func (f *formatter) String() (out string) { + switch f.unit { + case U_BYTES: + out = formatBytes(f.n) + case U_BYTES_DEC: + out = formatBytesDec(f.n) + case U_DURATION: + out = formatDuration(f.n) + default: + out = fmt.Sprintf(fmt.Sprintf("%%%dd", f.width), f.n) + } + if f.perSec { + out += "/s" + } + return +} + +// Convert bytes to human readable string. Like 2 MiB, 64.2 KiB, 52 B +func formatBytes(i int64) (result string) { + switch { + case i >= TiB: + result = fmt.Sprintf("%.02f TiB", float64(i)/TiB) + case i >= GiB: + result = fmt.Sprintf("%.02f GiB", float64(i)/GiB) + case i >= MiB: + result = fmt.Sprintf("%.02f MiB", float64(i)/MiB) + case i >= KiB: + result = fmt.Sprintf("%.02f KiB", float64(i)/KiB) + default: + result = fmt.Sprintf("%d B", i) + } + return +} + +// Convert bytes to base-10 human readable string. Like 2 MB, 64.2 KB, 52 B +func formatBytesDec(i int64) (result string) { + switch { + case i >= TB: + result = fmt.Sprintf("%.02f TB", float64(i)/TB) + case i >= GB: + result = fmt.Sprintf("%.02f GB", float64(i)/GB) + case i >= MB: + result = fmt.Sprintf("%.02f MB", float64(i)/MB) + case i >= KB: + result = fmt.Sprintf("%.02f KB", float64(i)/KB) + default: + result = fmt.Sprintf("%d B", i) + } + return +} + +func formatDuration(n int64) (result string) { + d := time.Duration(n) + if d > time.Hour*24 { + result = fmt.Sprintf("%dd", d/24/time.Hour) + d -= (d / time.Hour / 24) * (time.Hour * 24) + } + result = fmt.Sprintf("%s%v", result, d) + return +} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb.go b/vendor/github.com/cheggaaa/pb/pb.go similarity index 82% rename from vendor/gopkg.in/cheggaaa/pb.v1/pb.go rename to vendor/github.com/cheggaaa/pb/pb.go index f1f15bff3..19eb4d1a9 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pb.go +++ b/vendor/github.com/cheggaaa/pb/pb.go @@ -13,7 +13,7 @@ import ( ) // Current version -const Version = "1.0.6" +const Version = "1.0.19" const ( // Default refresh rate - 200ms @@ -47,8 +47,6 @@ func New64(total int64) *ProgressBar { Units: U_NO, ManualUpdate: false, finish: make(chan struct{}), - currentValue: -1, - mu: new(sync.Mutex), } return pb.Format(FORMAT) } @@ -67,7 +65,8 @@ func StartNew(total int) *ProgressBar { type Callback func(out string) type ProgressBar struct { - current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278) + current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278) + previous int64 Total int64 RefreshRate time.Duration @@ -91,13 +90,14 @@ type ProgressBar struct { finish chan struct{} isFinish bool - startTime time.Time - startValue int64 - currentValue int64 + startTime time.Time + startValue int64 + + changeTime time.Time prefix, postfix string - mu *sync.Mutex + mu sync.Mutex lastPrint string BarStart string @@ -112,7 +112,7 @@ type ProgressBar struct { // Start print func (pb *ProgressBar) Start() *ProgressBar { pb.startTime = time.Now() - pb.startValue = pb.current + pb.startValue = atomic.LoadInt64(&pb.current) if pb.Total == 0 { pb.ShowTimeLeft = false pb.ShowPercent = false @@ -173,7 +173,7 @@ func (pb *ProgressBar) Postfix(postfix string) *ProgressBar { // Example: bar.Format("[\x00=\x00>\x00-\x00]") // \x00 is the delimiter func (pb *ProgressBar) Format(format string) *ProgressBar { var formatEntries []string - if len(format) == 5 { + if utf8.RuneCountInString(format) == 5 { formatEntries = strings.Split(format, "") } else { formatEntries = strings.Split(format, "\x00") @@ -222,6 +222,8 @@ func (pb *ProgressBar) Finish() { pb.finishOnce.Do(func() { close(pb.finish) pb.write(atomic.LoadInt64(&pb.current)) + pb.mu.Lock() + defer pb.mu.Unlock() switch { case pb.Output != nil: fmt.Fprintln(pb.Output) @@ -232,6 +234,13 @@ func (pb *ProgressBar) Finish() { }) } +// IsFinished return boolean +func (pb *ProgressBar) IsFinished() bool { + pb.mu.Lock() + defer pb.mu.Unlock() + return pb.isFinish +} + // End print and write string 'str' func (pb *ProgressBar) FinishPrint(str string) { pb.Finish() @@ -290,8 +299,12 @@ func (pb *ProgressBar) write(current int64) { } // time left - fromStart := time.Now().Sub(pb.startTime) + pb.mu.Lock() currentFromStart := current - pb.startValue + fromStart := time.Now().Sub(pb.startTime) + lastChangeTime := pb.changeTime + fromChange := lastChangeTime.Sub(pb.startTime) + pb.mu.Unlock() select { case <-pb.finish: if pb.ShowFinalTime { @@ -301,17 +314,20 @@ func (pb *ProgressBar) write(current int64) { } default: if pb.ShowTimeLeft && currentFromStart > 0 { - perEntry := fromStart / time.Duration(currentFromStart) + perEntry := fromChange / time.Duration(currentFromStart) var left time.Duration if pb.Total > 0 { left = time.Duration(pb.Total-currentFromStart) * perEntry + left -= time.Since(lastChangeTime) left = (left / time.Second) * time.Second } else { left = time.Duration(currentFromStart) * perEntry left = (left / time.Second) * time.Second } - timeLeft := Format(int64(left)).To(U_DURATION).String() - timeLeftBox = fmt.Sprintf(" %s", timeLeft) + if left > 0 { + timeLeft := Format(int64(left)).To(U_DURATION).String() + timeLeftBox = fmt.Sprintf(" %s", timeLeft) + } } } @@ -332,24 +348,32 @@ func (pb *ProgressBar) write(current int64) { size := width - barWidth if size > 0 { if pb.Total > 0 { - curCount := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size))) - emptCount := size - curCount + curSize := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size))) + emptySize := size - curSize barBox = pb.BarStart - if emptCount < 0 { - emptCount = 0 + if emptySize < 0 { + emptySize = 0 } - if curCount > size { - curCount = size + if curSize > size { + curSize = size } - if emptCount <= 0 { - barBox += strings.Repeat(pb.Current, curCount) - } else if curCount > 0 { - barBox += strings.Repeat(pb.Current, curCount-1) + pb.CurrentN + + cursorLen := escapeAwareRuneCountInString(pb.Current) + if emptySize <= 0 { + barBox += strings.Repeat(pb.Current, curSize/cursorLen) + } else if curSize > 0 { + cursorEndLen := escapeAwareRuneCountInString(pb.CurrentN) + cursorRepetitions := (curSize - cursorEndLen) / cursorLen + barBox += strings.Repeat(pb.Current, cursorRepetitions) + barBox += pb.CurrentN } - barBox += strings.Repeat(pb.Empty, emptCount) + pb.BarEnd + + emptyLen := escapeAwareRuneCountInString(pb.Empty) + barBox += strings.Repeat(pb.Empty, emptySize/emptyLen) + barBox += pb.BarEnd } else { - barBox = pb.BarStart pos := size - int(current)%int(size) + barBox = pb.BarStart if pos-1 > 0 { barBox += strings.Repeat(pb.Empty, pos-1) } @@ -364,16 +388,17 @@ func (pb *ProgressBar) write(current int64) { // check len out = pb.prefix + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix - if escapeAwareRuneCountInString(out) < width { - end = strings.Repeat(" ", width-utf8.RuneCountInString(out)) + if cl := escapeAwareRuneCountInString(out); cl < width { + end = strings.Repeat(" ", width-cl) } // and print! pb.mu.Lock() pb.lastPrint = out + end + isFinish := pb.isFinish pb.mu.Unlock() switch { - case pb.isFinish: + case isFinish: return case pb.Output != nil: fmt.Fprint(pb.Output, "\r"+out+end) @@ -406,10 +431,14 @@ func (pb *ProgressBar) GetWidth() int { // Write the current state of the progressbar func (pb *ProgressBar) Update() { c := atomic.LoadInt64(&pb.current) - if pb.AlwaysUpdate || c != pb.currentValue { - pb.write(c) - pb.currentValue = c + p := atomic.LoadInt64(&pb.previous) + if p != c { + pb.mu.Lock() + pb.changeTime = time.Now() + pb.mu.Unlock() + atomic.StoreInt64(&pb.previous, c) } + pb.write(c) if pb.AutoStat { if c == 0 { pb.startTime = time.Now() @@ -420,7 +449,10 @@ func (pb *ProgressBar) Update() { } } +// String return the last bar print func (pb *ProgressBar) String() string { + pb.mu.Lock() + defer pb.mu.Unlock() return pb.lastPrint } @@ -435,10 +467,3 @@ func (pb *ProgressBar) refresher() { } } } - -type window struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go b/vendor/github.com/cheggaaa/pb/pb_win.go similarity index 100% rename from vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go rename to vendor/github.com/cheggaaa/pb/pb_win.go diff --git a/vendor/github.com/cheggaaa/pb/pb_x.go b/vendor/github.com/cheggaaa/pb/pb_x.go new file mode 100644 index 000000000..bbbe7c2d6 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/pb_x.go @@ -0,0 +1,108 @@ +// +build linux darwin freebsd netbsd openbsd solaris dragonfly +// +build !appengine + +package pb + +import ( + "errors" + "fmt" + "os" + "os/signal" + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +var ErrPoolWasStarted = errors.New("Bar pool was started") + +var ( + echoLockMutex sync.Mutex + origTermStatePtr *unix.Termios + tty *os.File +) + +func init() { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + + var err error + tty, err = os.Open("/dev/tty") + if err != nil { + tty = os.Stdin + } +} + +// terminalWidth returns width of the terminal. +func terminalWidth() (int, error) { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + + fd := int(tty.Fd()) + + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return 0, err + } + + return int(ws.Col), nil +} + +func lockEcho() (quit chan int, err error) { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + if origTermStatePtr != nil { + return quit, ErrPoolWasStarted + } + + fd := int(tty.Fd()) + + oldTermStatePtr, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, fmt.Errorf("Can't get terminal settings: %v", err) + } + + oldTermios := *oldTermStatePtr + newTermios := oldTermios + newTermios.Lflag &^= syscall.ECHO + newTermios.Lflag |= syscall.ICANON | syscall.ISIG + newTermios.Iflag |= syscall.ICRNL + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newTermios); err != nil { + return nil, fmt.Errorf("Can't set terminal settings: %v", err) + } + + quit = make(chan int, 1) + go catchTerminate(quit) + return +} + +func unlockEcho() error { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + if origTermStatePtr == nil { + return nil + } + + fd := int(tty.Fd()) + + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, origTermStatePtr); err != nil { + return fmt.Errorf("Can't set terminal settings: %v", err) + } + + origTermStatePtr = nil + + return nil +} + +// listen exit signals and restore terminal state +func catchTerminate(quit chan int) { + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL) + defer signal.Stop(sig) + select { + case <-quit: + unlockEcho() + case <-sig: + unlockEcho() + } +} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool.go b/vendor/github.com/cheggaaa/pb/pool.go similarity index 84% rename from vendor/gopkg.in/cheggaaa/pb.v1/pool.go rename to vendor/github.com/cheggaaa/pb/pool.go index 0b4a4afa8..bc1a13886 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pool.go +++ b/vendor/github.com/cheggaaa/pb/pool.go @@ -3,6 +3,7 @@ package pb import ( + "io" "sync" "time" ) @@ -19,14 +20,19 @@ func StartPool(pbs ...*ProgressBar) (pool *Pool, err error) { } type Pool struct { - RefreshRate time.Duration - bars []*ProgressBar - quit chan int - finishOnce sync.Once + Output io.Writer + RefreshRate time.Duration + bars []*ProgressBar + lastBarsCount int + quit chan int + m sync.Mutex + finishOnce sync.Once } // Add progress bars. func (p *Pool) Add(pbs ...*ProgressBar) { + p.m.Lock() + defer p.m.Unlock() for _, bar := range pbs { bar.ManualUpdate = true bar.NotPrint = true diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go b/vendor/github.com/cheggaaa/pb/pool_win.go similarity index 64% rename from vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go rename to vendor/github.com/cheggaaa/pb/pool_win.go index d7a5ace41..63598d378 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go +++ b/vendor/github.com/cheggaaa/pb/pool_win.go @@ -8,13 +8,18 @@ import ( ) func (p *Pool) print(first bool) bool { + p.m.Lock() + defer p.m.Unlock() var out string if !first { coords, err := getCursorPos() if err != nil { log.Panic(err) } - coords.Y -= int16(len(p.bars)) + coords.Y -= int16(p.lastBarsCount) + if coords.Y < 0 { + coords.Y = 0 + } coords.X = 0 err = setCursorPos(coords) @@ -24,12 +29,17 @@ func (p *Pool) print(first bool) bool { } isFinished := true for _, bar := range p.bars { - if !bar.isFinish { + if !bar.IsFinished() { isFinished = false } bar.Update() out += fmt.Sprintf("\r%s\n", bar.String()) } - fmt.Print(out) + if p.Output != nil { + fmt.Fprint(p.Output, out) + } else { + fmt.Print(out) + } + p.lastBarsCount = len(p.bars) return isFinished } diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go b/vendor/github.com/cheggaaa/pb/pool_x.go similarity index 59% rename from vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go rename to vendor/github.com/cheggaaa/pb/pool_x.go index d95b71d87..a8ae14d2f 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go +++ b/vendor/github.com/cheggaaa/pb/pool_x.go @@ -5,18 +5,25 @@ package pb import "fmt" func (p *Pool) print(first bool) bool { + p.m.Lock() + defer p.m.Unlock() var out string if !first { - out = fmt.Sprintf("\033[%dA", len(p.bars)) + out = fmt.Sprintf("\033[%dA", p.lastBarsCount) } isFinished := true for _, bar := range p.bars { - if !bar.isFinish { + if !bar.IsFinished() { isFinished = false } bar.Update() out += fmt.Sprintf("\r%s\n", bar.String()) } - fmt.Print(out) + if p.Output != nil { + fmt.Fprint(p.Output, out) + } else { + fmt.Print(out) + } + p.lastBarsCount = len(p.bars) return isFinished } diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/reader.go b/vendor/github.com/cheggaaa/pb/reader.go similarity index 100% rename from vendor/gopkg.in/cheggaaa/pb.v1/reader.go rename to vendor/github.com/cheggaaa/pb/reader.go diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go b/vendor/github.com/cheggaaa/pb/runecount.go similarity index 90% rename from vendor/gopkg.in/cheggaaa/pb.v1/runecount.go rename to vendor/github.com/cheggaaa/pb/runecount.go index d52edd365..c617c55ec 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go +++ b/vendor/github.com/cheggaaa/pb/runecount.go @@ -11,7 +11,7 @@ var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d") func escapeAwareRuneCountInString(s string) int { n := runewidth.StringWidth(s) for _, sm := range ctrlFinder.FindAllString(s, -1) { - n -= len(sm) + n -= runewidth.StringWidth(sm) } return n } diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/termios_bsd.go b/vendor/github.com/cheggaaa/pb/termios_bsd.go similarity index 100% rename from vendor/gopkg.in/cheggaaa/pb.v1/termios_bsd.go rename to vendor/github.com/cheggaaa/pb/termios_bsd.go diff --git a/vendor/github.com/cheggaaa/pb/termios_sysv.go b/vendor/github.com/cheggaaa/pb/termios_sysv.go new file mode 100644 index 000000000..b10f61859 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/termios_sysv.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux solaris +// +build !appengine + +package pb + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS +const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/format.go b/vendor/gopkg.in/cheggaaa/pb.v1/format.go deleted file mode 100644 index d5aeff793..000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/format.go +++ /dev/null @@ -1,87 +0,0 @@ -package pb - -import ( - "fmt" - "strings" - "time" -) - -type Units int - -const ( - // U_NO are default units, they represent a simple value and are not formatted at all. - U_NO Units = iota - // U_BYTES units are formatted in a human readable way (b, Bb, Mb, ...) - U_BYTES - // U_DURATION units are formatted in a human readable way (3h14m15s) - U_DURATION -) - -func Format(i int64) *formatter { - return &formatter{n: i} -} - -type formatter struct { - n int64 - unit Units - width int - perSec bool -} - -func (f *formatter) Value(n int64) *formatter { - f.n = n - return f -} - -func (f *formatter) To(unit Units) *formatter { - f.unit = unit - return f -} - -func (f *formatter) Width(width int) *formatter { - f.width = width - return f -} - -func (f *formatter) PerSec() *formatter { - f.perSec = true - return f -} - -func (f *formatter) String() (out string) { - switch f.unit { - case U_BYTES: - out = formatBytes(f.n) - case U_DURATION: - d := time.Duration(f.n) - if d > time.Hour*24 { - out = fmt.Sprintf("%dd", d/24/time.Hour) - d -= (d / time.Hour / 24) * (time.Hour * 24) - } - out = fmt.Sprintf("%s%v", out, d) - default: - out = fmt.Sprintf(fmt.Sprintf("%%%dd", f.width), f.n) - } - if f.perSec { - out += "/s" - } - return -} - -// Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B -func formatBytes(i int64) (result string) { - switch { - case i > (1024 * 1024 * 1024 * 1024): - result = fmt.Sprintf("%.02f TB", float64(i)/1024/1024/1024/1024) - case i > (1024 * 1024 * 1024): - result = fmt.Sprintf("%.02f GB", float64(i)/1024/1024/1024) - case i > (1024 * 1024): - result = fmt.Sprintf("%.02f MB", float64(i)/1024/1024) - case i > 1024: - result = fmt.Sprintf("%.02f KB", float64(i)/1024) - default: - result = fmt.Sprintf("%d B", i) - } - result = strings.Trim(result, " ") - return -} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go deleted file mode 100644 index c06097b43..000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build linux darwin freebsd netbsd openbsd dragonfly -// +build !appengine - -package pb - -import "syscall" - -const sysIoctl = syscall.SYS_IOCTL diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go deleted file mode 100644 index b7d461e17..000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build solaris -// +build !appengine - -package pb - -const sysIoctl = 54 diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go deleted file mode 100644 index 12e6fe6e2..000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go +++ /dev/null @@ -1,110 +0,0 @@ -// +build linux darwin freebsd netbsd openbsd solaris dragonfly -// +build !appengine - -package pb - -import ( - "errors" - "fmt" - "os" - "os/signal" - "runtime" - "sync" - "syscall" - "unsafe" -) - -const ( - TIOCGWINSZ = 0x5413 - TIOCGWINSZ_OSX = 1074295912 -) - -var tty *os.File - -var ErrPoolWasStarted = errors.New("Bar pool was started") - -var echoLocked bool -var echoLockMutex sync.Mutex - -func init() { - var err error - tty, err = os.Open("/dev/tty") - if err != nil { - tty = os.Stdin - } -} - -// terminalWidth returns width of the terminal. -func terminalWidth() (int, error) { - w := new(window) - tio := syscall.TIOCGWINSZ - if runtime.GOOS == "darwin" { - tio = TIOCGWINSZ_OSX - } - res, _, err := syscall.Syscall(sysIoctl, - tty.Fd(), - uintptr(tio), - uintptr(unsafe.Pointer(w)), - ) - if int(res) == -1 { - return 0, err - } - return int(w.Col), nil -} - -var oldState syscall.Termios - -func lockEcho() (quit chan int, err error) { - echoLockMutex.Lock() - defer echoLockMutex.Unlock() - if echoLocked { - err = ErrPoolWasStarted - return - } - echoLocked = true - - fd := tty.Fd() - if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 { - err = fmt.Errorf("Can't get terminal settings: %v", e) - return - } - - newState := oldState - newState.Lflag &^= syscall.ECHO - newState.Lflag |= syscall.ICANON | syscall.ISIG - newState.Iflag |= syscall.ICRNL - if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); e != 0 { - err = fmt.Errorf("Can't set terminal settings: %v", e) - return - } - quit = make(chan int, 1) - go catchTerminate(quit) - return -} - -func unlockEcho() (err error) { - echoLockMutex.Lock() - defer echoLockMutex.Unlock() - if !echoLocked { - return - } - echoLocked = false - fd := tty.Fd() - if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 { - err = fmt.Errorf("Can't set terminal settings") - } - return -} - -// listen exit signals and restore terminal state -func catchTerminate(quit chan int) { - sig := make(chan os.Signal, 1) - signal.Notify(sig, os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL) - defer signal.Stop(sig) - select { - case <-quit: - unlockEcho() - case <-sig: - unlockEcho() - } -} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go b/vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go deleted file mode 100644 index ebb3fe87c..000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build linux solaris -// +build !appengine - -package pb - -const ioctlReadTermios = 0x5401 // syscall.TCGETS -const ioctlWriteTermios = 0x5402 // syscall.TCSETS diff --git a/vendor/vendor.json b/vendor/vendor.json index e47378fdf..e536cbb7d 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -565,6 +565,12 @@ "path": "github.com/biogo/hts/bgzf", "revision": "50da7d4131a3b5c9d063932461cab4d1fafb20b0" }, + { + "checksumSHA1": "ymc5+iJ+1ipls3ihqPdzMjFYCqo=", + "path": "github.com/cheggaaa/pb", + "revision": "18d384da9bdc1e5a08fc2a62a494c321d9ae74ea", + "revisionTime": "2017-12-14T13:20:59Z" + }, { "checksumSHA1": "Lf3uUXTkKK5DJ37BxQvxO1Fq+K8=", "comment": "v1.0.0-3-g6d21280", @@ -1493,12 +1499,6 @@ "path": "google.golang.org/cloud/internal", "revision": "5a3b06f8b5da3b7c3a93da43163b872c86c509ef" }, - { - "checksumSHA1": "w3z2xM4+RT/OWvVusDgNXZZEEkE=", - "path": "gopkg.in/cheggaaa/pb.v1", - "revision": "d7e6ca3010b6f084d8056847f55d7f572f180678", - "revisionTime": "2016-11-21T09:29:06Z" - }, { "checksumSHA1": "gY2M/3SCxqgrPz+P/N6JQ+m/wnA=", "path": "gopkg.in/xmlpath.v2", From 5a3e98b529be462209badd511f2686fa6de8ee0f Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Wed, 3 Jan 2018 20:31:49 -0600 Subject: [PATCH 052/251] Updated the testcases in common/download_test.go to pass a non-nil progress-bar due to the removal of a pointer type in commit ed2e341b7d7f49a063dd5018701b4ae548b8ec14 from yesterday. --- common/download.go | 15 +++++++++++++-- common/download_test.go | 24 +++++++++++++----------- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/common/download.go b/common/download.go index e5ebe305f..cfa2d8849 100644 --- a/common/download.go +++ b/common/download.go @@ -516,9 +516,20 @@ func (d *FileDownloader) toPath(base string, uri url.URL) (string, error) { result = path.Join(uri.Host, uri.Path) // semi-absolute path (current drive letter) - // -- file:///absolute/path -> /absolute/path + // -- file:///absolute/path -> drive:/absolute/path } else if uri.Host == "" && strings.HasPrefix(uri.Path, "/") { - result = path.Join(filepath.VolumeName(base), uri.Path) + apath := uri.Path + components := strings.Split(apath, "/") + volume := filepath.VolumeName(base) + + // semi-absolute absolute path (includes volume letter) + // -- file://drive:/path -> drive:/absolute/path + if len(components) > 1 && strings.HasSuffix(components[1], ":") { + volume = components[1] + apath = path.Join(components[2:]...) + } + + result = path.Join(volume, apath) // relative path -- file://./relative/path -> ./relative/path } else if uri.Host == "." { diff --git a/common/download_test.go b/common/download_test.go index 50bdfa971..db31fe152 100644 --- a/common/download_test.go +++ b/common/download_test.go @@ -12,6 +12,8 @@ import ( "runtime" "strings" "testing" + + "github.com/cheggaaa/pb" ) func TestDownloadClientVerifyChecksum(t *testing.T) { @@ -36,7 +38,7 @@ func TestDownloadClientVerifyChecksum(t *testing.T) { Checksum: checksum, } - d := NewDownloadClient(config, nil) + d := NewDownloadClient(config, *pb.New64(0)) result, err := d.VerifyChecksum(tf.Name()) if err != nil { t.Fatalf("Verify err: %s", err) @@ -59,7 +61,7 @@ func TestDownloadClient_basic(t *testing.T) { Url: ts.URL + "/basic.txt", TargetPath: tf.Name(), CopyFile: true, - }, nil) + }, *pb.New64(0)) path, err := client.Get() if err != nil { @@ -95,7 +97,7 @@ func TestDownloadClient_checksumBad(t *testing.T) { Hash: HashForType("md5"), Checksum: checksum, CopyFile: true, - }, nil) + }, *pb.New64(0)) if _, err := client.Get(); err == nil { t.Fatal("should error") } @@ -120,7 +122,7 @@ func TestDownloadClient_checksumGood(t *testing.T) { Hash: HashForType("md5"), Checksum: checksum, CopyFile: true, - }, nil) + }, *pb.New64(0)) path, err := client.Get() if err != nil { t.Fatalf("err: %s", err) @@ -151,7 +153,7 @@ func TestDownloadClient_checksumNoDownload(t *testing.T) { Hash: HashForType("md5"), Checksum: checksum, CopyFile: true, - }, nil) + }, *pb.New64(0)) path, err := client.Get() if err != nil { t.Fatalf("err: %s", err) @@ -190,7 +192,7 @@ func TestDownloadClient_resume(t *testing.T) { Url: ts.URL, TargetPath: tf.Name(), CopyFile: true, - }, nil) + }, *pb.New64(0)) path, err := client.Get() if err != nil { t.Fatalf("err: %s", err) @@ -250,7 +252,7 @@ func TestDownloadClient_usesDefaultUserAgent(t *testing.T) { CopyFile: true, } - client := NewDownloadClient(config, nil) + client := NewDownloadClient(config, *pb.New64(0)) _, err = client.Get() if err != nil { t.Fatal(err) @@ -282,7 +284,7 @@ func TestDownloadClient_setsUserAgent(t *testing.T) { CopyFile: true, } - client := NewDownloadClient(config, nil) + client := NewDownloadClient(config, *pb.New64(0)) _, err = client.Get() if err != nil { t.Fatal(err) @@ -381,12 +383,12 @@ func TestDownloadFileUrl(t *testing.T) { CopyFile: false, } - client := NewDownloadClient(config, nil) + client := NewDownloadClient(config, *pb.New64(0)) // Verify that we fail to match the checksum _, err = client.Get() if err.Error() != "checksums didn't match expected: 6e6f7065" { - t.Fatalf("Unexpected failure; expected checksum not to match") + t.Fatalf("Unexpected failure; expected checksum not to match. Error was \"%v\"", err) } if _, err = os.Stat(sourcePath); err != nil { @@ -412,7 +414,7 @@ func SimulateFileUriDownload(t *testing.T, uri string) (string, error) { } // go go go - client := NewDownloadClient(config, nil) + client := NewDownloadClient(config, *pb.New64(0)) path, err := client.Get() // ignore any non-important checksum errors if it's not a unc path From 5d97b105a8b6baf2cf3004e3765f5df66113a1a1 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Sat, 6 Jan 2018 19:32:18 -0600 Subject: [PATCH 053/251] Removed implementation of the ftp protocol and the usage of cheggaaa's progress-bar as suggested by @SwampDragons. Replaced some of the old smoke-tests that were based on the ftp-protocol non-existing with a "non-existent://" protocol that's guaranteed to not exist. --- builder/virtualbox/ovf/config_test.go | 11 + builder/vmware/iso/step_create_vmx.go | 2 +- common/config.go | 2 +- common/config_test.go | 10 +- common/download.go | 196 +-- common/download_test.go | 22 +- common/step_download.go | 44 +- packer/core.go | 2 +- vendor/github.com/cheggaaa/pb/LICENSE | 12 - vendor/github.com/cheggaaa/pb/README.md | 179 --- vendor/github.com/cheggaaa/pb/format.go | 118 -- vendor/github.com/cheggaaa/pb/pb.go | 469 ------- vendor/github.com/cheggaaa/pb/pb_win.go | 141 -- vendor/github.com/cheggaaa/pb/pb_x.go | 108 -- vendor/github.com/cheggaaa/pb/pool.go | 82 -- vendor/github.com/cheggaaa/pb/pool_win.go | 45 - vendor/github.com/cheggaaa/pb/pool_x.go | 29 - vendor/github.com/cheggaaa/pb/reader.go | 25 - vendor/github.com/cheggaaa/pb/runecount.go | 17 - vendor/github.com/cheggaaa/pb/termios_bsd.go | 9 - vendor/github.com/cheggaaa/pb/termios_sysv.go | 13 - vendor/github.com/jlaffaye/ftp/LICENSE | 13 - vendor/github.com/jlaffaye/ftp/README.md | 17 - vendor/github.com/jlaffaye/ftp/ftp.go | 671 --------- vendor/github.com/jlaffaye/ftp/status.go | 106 -- vendor/github.com/mattn/go-runewidth/LICENSE | 21 - .../github.com/mattn/go-runewidth/README.mkd | 27 - .../mattn/go-runewidth/runewidth.go | 1223 ----------------- .../mattn/go-runewidth/runewidth_js.go | 8 - .../mattn/go-runewidth/runewidth_posix.go | 77 -- .../mattn/go-runewidth/runewidth_windows.go | 25 - vendor/vendor.json | 28 - 32 files changed, 60 insertions(+), 3692 deletions(-) delete mode 100644 vendor/github.com/cheggaaa/pb/LICENSE delete mode 100644 vendor/github.com/cheggaaa/pb/README.md delete mode 100644 vendor/github.com/cheggaaa/pb/format.go delete mode 100644 vendor/github.com/cheggaaa/pb/pb.go delete mode 100644 vendor/github.com/cheggaaa/pb/pb_win.go delete mode 100644 vendor/github.com/cheggaaa/pb/pb_x.go delete mode 100644 vendor/github.com/cheggaaa/pb/pool.go delete mode 100644 vendor/github.com/cheggaaa/pb/pool_win.go delete mode 100644 vendor/github.com/cheggaaa/pb/pool_x.go delete mode 100644 vendor/github.com/cheggaaa/pb/reader.go delete mode 100644 vendor/github.com/cheggaaa/pb/runecount.go delete mode 100644 vendor/github.com/cheggaaa/pb/termios_bsd.go delete mode 100644 vendor/github.com/cheggaaa/pb/termios_sysv.go delete mode 100644 vendor/github.com/jlaffaye/ftp/LICENSE delete mode 100644 vendor/github.com/jlaffaye/ftp/README.md delete mode 100644 vendor/github.com/jlaffaye/ftp/ftp.go delete mode 100644 vendor/github.com/jlaffaye/ftp/status.go delete mode 100644 vendor/github.com/mattn/go-runewidth/LICENSE delete mode 100644 vendor/github.com/mattn/go-runewidth/README.mkd delete mode 100644 vendor/github.com/mattn/go-runewidth/runewidth.go delete mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_js.go delete mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_posix.go delete mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_windows.go diff --git a/builder/virtualbox/ovf/config_test.go b/builder/virtualbox/ovf/config_test.go index e9c536e54..baba657a8 100644 --- a/builder/virtualbox/ovf/config_test.go +++ b/builder/virtualbox/ovf/config_test.go @@ -76,6 +76,17 @@ func TestNewConfig_sourcePath(t *testing.T) { t.Fatalf("Nonexistant file should throw a validation error!") } + // Bad + c = testConfig(t) + c["source_path"] = "nonexistent-protocol://i/dont/exist" + _, warns, err = NewConfig(c) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err == nil { + t.Fatalf("should error") + } + // Good tf := getTempFile(t) defer os.Remove(tf.Name()) diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index 3f3644e70..292a6a6e7 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -44,7 +44,7 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) // Convert the iso_path into a path relative to the .vmx file if possible - if relativeIsoPath, err := filepath.Rel(config.VMXTemplatePath, filepath.FromSlash(isoPath)); err == nil { + if relativeIsoPath, err := filepath.Abs(config.VMXTemplatePath, filepath.FromSlash(isoPath)); err == nil { isoPath = relativeIsoPath } diff --git a/common/config.go b/common/config.go index 0e97f6a0e..c8c99a31e 100644 --- a/common/config.go +++ b/common/config.go @@ -48,7 +48,7 @@ func ChooseString(vals ...string) string { func DownloadableURL(original string) (string, error) { // Verify that the scheme is something we support in our common downloader. - supported := []string{"file", "http", "https", "ftp", "smb"} + supported := []string{"file", "http", "https", "smb"} found := false for _, s := range supported { if strings.HasPrefix(strings.ToLower(original), s+"://") { diff --git a/common/config_test.go b/common/config_test.go index 3b1aff8cf..f9b1748ce 100644 --- a/common/config_test.go +++ b/common/config_test.go @@ -43,6 +43,12 @@ func TestDownloadableURL(t *testing.T) { t.Fatalf("expected err : %s", err) } + // Invalid: unsupported scheme + _, err = DownloadableURL("nonexistent-protocol://host.com/path") + if err == nil { + t.Fatalf("expected err : %s", err) + } + // Valid: http u, err := DownloadableURL("HTTP://packer.io/path") if err != nil { @@ -190,7 +196,9 @@ func TestDownloadableURL_FilePaths(t *testing.T) { t.Fatalf("err: %s", err) } - expected := "file://" + strings.Replace(tfPath, `\`, `/`, -1) + expected := fmt.Sprintf("%s%s", + filePrefix, + strings.Replace(tfPath, `\`, `/`, -1)) if u != expected { t.Fatalf("unexpected: %#v != %#v", u, expected) } diff --git a/common/download.go b/common/download.go index cfa2d8849..27e9b32a8 100644 --- a/common/download.go +++ b/common/download.go @@ -20,8 +20,6 @@ import ( // imports related to each Downloader implementation import ( - "github.com/cheggaaa/pb" - "github.com/jlaffaye/ftp" "io" "net/http" "path/filepath" @@ -83,24 +81,24 @@ func HashForType(t string) hash.Hash { // NewDownloadClient returns a new DownloadClient for the given // configuration. -func NewDownloadClient(c *DownloadConfig, bar pb.ProgressBar) *DownloadClient { +func NewDownloadClient(c *DownloadConfig) *DownloadClient { const mtu = 1500 /* ethernet */ - 20 /* ipv4 */ - 20 /* tcp */ // Create downloader map if it hasn't been specified already. if c.DownloaderMap == nil { + // XXX: Make sure you add any new protocols you implement + // to the DownloadableURL implementation in config.go! c.DownloaderMap = map[string]Downloader{ - "file": &FileDownloader{progress: &bar, bufferSize: nil}, - "ftp": &FTPDownloader{progress: &bar, userInfo: url.UserPassword("anonymous", "anonymous@"), mtu: mtu}, - "http": &HTTPDownloader{progress: &bar, userAgent: c.UserAgent}, - "https": &HTTPDownloader{progress: &bar, userAgent: c.UserAgent}, - "smb": &SMBDownloader{progress: &bar, bufferSize: nil}, + "file": &FileDownloader{bufferSize: nil}, + "http": &HTTPDownloader{userAgent: c.UserAgent}, + "https": &HTTPDownloader{userAgent: c.UserAgent}, + "smb": &SMBDownloader{bufferSize: nil}, } } return &DownloadClient{config: c} } -// A downloader implements the ability to transfer a file, and cancel or resume -// it. +// A downloader implements the ability to transfer, cancel, or resume a file. type Downloader interface { Resume() Cancel() @@ -209,6 +207,14 @@ func (d *DownloadClient) Get() (string, error) { return finalPath, err } +func (d *DownloadClient) PercentProgress() int { + if (d.downloader == nil) { + return -1 + } + + return int((float64(d.downloader.Progress()) / float64(d.downloader.Total())) * 100) +} + // VerifyChecksum tests that the path matches the checksum for the // download. func (d *DownloadClient) VerifyChecksum(path string) (bool, error) { @@ -234,8 +240,6 @@ type HTTPDownloader struct { current uint64 total uint64 userAgent string - - progress *pb.ProgressBar } func (d *HTTPDownloader) Cancel() { @@ -300,10 +304,6 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { d.total = d.current + uint64(resp.ContentLength) - d.progress.Total = int64(d.total) - progressBar := d.progress.Start() - progressBar.Set64(int64(d.current)) - var buffer [4096]byte for { n, err := resp.Body.Read(buffer[:]) @@ -312,7 +312,6 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { } d.current += uint64(n) - progressBar.Set64(int64(d.current)) if _, werr := dst.Write(buffer[:n]); werr != nil { return werr @@ -322,7 +321,6 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { break } } - progressBar.Finish() return nil } @@ -334,152 +332,6 @@ func (d *HTTPDownloader) Total() uint64 { return d.total } -// FTPDownloader is an implementation of Downloader that downloads -// files over FTP. -type FTPDownloader struct { - userInfo *url.Userinfo - mtu uint - - active bool - current uint64 - total uint64 - - progress *pb.ProgressBar -} - -func (d *FTPDownloader) Progress() uint64 { - return d.current -} - -func (d *FTPDownloader) Total() uint64 { - return d.total -} - -func (d *FTPDownloader) Cancel() { - d.active = false -} - -func (d *FTPDownloader) Resume() { - // TODO: Implement -} - -func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error { - var userinfo *url.Userinfo - - userinfo = d.userInfo - d.active = false - - // check the uri is correct - if src == nil || src.Scheme != "ftp" { - return fmt.Errorf("Unexpected uri scheme: %s", src.Scheme) - } - uri := src - - // connect to ftp server - var cli *ftp.ServerConn - - log.Printf("Starting download over FTP: %s : %s\n", uri.Host, uri.Path) - cli, err := ftp.Dial(uri.Host) - if err != nil { - return nil - } - defer cli.Quit() - - // handle authentication - if uri.User != nil { - userinfo = uri.User - } - - pass, ok := userinfo.Password() - if !ok { - pass = "ftp@" - } - - log.Printf("Authenticating to FTP server: %s : %s\n", userinfo.Username(), pass) - err = cli.Login(userinfo.Username(), pass) - if err != nil { - return err - } - - // locate specified path - p := path.Dir(uri.Path) - - log.Printf("Changing to FTP directory : %s\n", p) - err = cli.ChangeDir(p) - if err != nil { - return nil - } - - curpath, err := cli.CurrentDir() - if err != nil { - return err - } - log.Printf("Current FTP directory : %s\n", curpath) - - // collect stats about the specified file - var name string - var entry *ftp.Entry - - _, name = path.Split(uri.Path) - entry = nil - - entries, err := cli.List(curpath) - for _, e := range entries { - if e.Type == ftp.EntryTypeFile && e.Name == name { - entry = e - break - } - } - - if entry == nil { - return fmt.Errorf("Unable to find file: %s", uri.Path) - } - log.Printf("Found file : %s : %v bytes\n", entry.Name, entry.Size) - - d.current = 0 - d.total = entry.Size - - d.progress.Total = int64(d.total) - progressBar := d.progress.Start() - - // download specified file - d.active = true - reader, err := cli.RetrFrom(uri.Path, d.current) - if err != nil { - return nil - } - - // do it in a goro so that if someone wants to cancel it, they can - errch := make(chan error) - go func(d *FTPDownloader, r io.Reader, w io.Writer, e chan error) { - for d.active { - n, err := io.CopyN(w, r, int64(d.mtu)) - if err != nil { - break - } - - d.current += uint64(n) - progressBar.Set64(int64(d.current)) - } - d.active = false - e <- err - }(d, reader, dst, errch) - - // spin until it's done - err = <-errch - - progressBar.Finish() - reader.Close() - - if err == nil && d.current != d.total { - err = fmt.Errorf("FTP total transfer size was %d when %d was expected", d.current, d.total) - } - - // log out - cli.Logout() - return err -} - // FileDownloader is an implementation of Downloader that downloads // files using the regular filesystem. type FileDownloader struct { @@ -488,8 +340,6 @@ type FileDownloader struct { active bool current uint64 total uint64 - - progress *pb.ProgressBar } func (d *FileDownloader) Progress() uint64 { @@ -580,9 +430,6 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { } d.total = uint64(fi.Size()) - d.progress.Total = int64(d.total) - progressBar := d.progress.Start() - // no bufferSize specified, so copy synchronously. if d.bufferSize == nil { var n int64 @@ -590,7 +437,6 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { d.active = false d.current += uint64(n) - progressBar.Set64(int64(d.current)) // use a goro in case someone else wants to enable cancel/resume } else { @@ -603,7 +449,6 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { } d.current += uint64(n) - progressBar.Set64(int64(d.current)) } d.active = false e <- err @@ -612,7 +457,6 @@ func (d *FileDownloader) Download(dst *os.File, src *url.URL) error { // ...and we spin until it's done err = <-errch } - progressBar.Finish() f.Close() return err } @@ -625,8 +469,6 @@ type SMBDownloader struct { active bool current uint64 total uint64 - - progress *pb.ProgressBar } func (d *SMBDownloader) Progress() uint64 { @@ -699,9 +541,6 @@ func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { } d.total = uint64(fi.Size()) - d.progress.Total = int64(d.total) - progressBar := d.progress.Start() - // no bufferSize specified, so copy synchronously. if d.bufferSize == nil { var n int64 @@ -709,7 +548,6 @@ func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { d.active = false d.current += uint64(n) - progressBar.Set64(int64(d.current)) // use a goro in case someone else wants to enable cancel/resume } else { @@ -722,7 +560,6 @@ func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { } d.current += uint64(n) - progressBar.Set64(int64(d.current)) } d.active = false e <- err @@ -731,7 +568,6 @@ func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error { // ...and as usual we spin until it's done err = <-errch } - progressBar.Finish() f.Close() return err } diff --git a/common/download_test.go b/common/download_test.go index db31fe152..beac9cbab 100644 --- a/common/download_test.go +++ b/common/download_test.go @@ -12,8 +12,6 @@ import ( "runtime" "strings" "testing" - - "github.com/cheggaaa/pb" ) func TestDownloadClientVerifyChecksum(t *testing.T) { @@ -38,7 +36,7 @@ func TestDownloadClientVerifyChecksum(t *testing.T) { Checksum: checksum, } - d := NewDownloadClient(config, *pb.New64(0)) + d := NewDownloadClient(config) result, err := d.VerifyChecksum(tf.Name()) if err != nil { t.Fatalf("Verify err: %s", err) @@ -61,7 +59,7 @@ func TestDownloadClient_basic(t *testing.T) { Url: ts.URL + "/basic.txt", TargetPath: tf.Name(), CopyFile: true, - }, *pb.New64(0)) + }) path, err := client.Get() if err != nil { @@ -97,7 +95,7 @@ func TestDownloadClient_checksumBad(t *testing.T) { Hash: HashForType("md5"), Checksum: checksum, CopyFile: true, - }, *pb.New64(0)) + }) if _, err := client.Get(); err == nil { t.Fatal("should error") } @@ -122,7 +120,7 @@ func TestDownloadClient_checksumGood(t *testing.T) { Hash: HashForType("md5"), Checksum: checksum, CopyFile: true, - }, *pb.New64(0)) + }) path, err := client.Get() if err != nil { t.Fatalf("err: %s", err) @@ -153,7 +151,7 @@ func TestDownloadClient_checksumNoDownload(t *testing.T) { Hash: HashForType("md5"), Checksum: checksum, CopyFile: true, - }, *pb.New64(0)) + }) path, err := client.Get() if err != nil { t.Fatalf("err: %s", err) @@ -192,7 +190,7 @@ func TestDownloadClient_resume(t *testing.T) { Url: ts.URL, TargetPath: tf.Name(), CopyFile: true, - }, *pb.New64(0)) + }) path, err := client.Get() if err != nil { t.Fatalf("err: %s", err) @@ -252,7 +250,7 @@ func TestDownloadClient_usesDefaultUserAgent(t *testing.T) { CopyFile: true, } - client := NewDownloadClient(config, *pb.New64(0)) + client := NewDownloadClient(config) _, err = client.Get() if err != nil { t.Fatal(err) @@ -284,7 +282,7 @@ func TestDownloadClient_setsUserAgent(t *testing.T) { CopyFile: true, } - client := NewDownloadClient(config, *pb.New64(0)) + client := NewDownloadClient(config) _, err = client.Get() if err != nil { t.Fatal(err) @@ -383,7 +381,7 @@ func TestDownloadFileUrl(t *testing.T) { CopyFile: false, } - client := NewDownloadClient(config, *pb.New64(0)) + client := NewDownloadClient(config) // Verify that we fail to match the checksum _, err = client.Get() @@ -414,7 +412,7 @@ func SimulateFileUriDownload(t *testing.T, uri string) (string, error) { } // go go go - client := NewDownloadClient(config, *pb.New64(0)) + client := NewDownloadClient(config) path, err := client.Get() // ignore any non-important checksum errors if it's not a unc path diff --git a/common/step_download.go b/common/step_download.go index f483eaba6..765a07d23 100644 --- a/common/step_download.go +++ b/common/step_download.go @@ -7,7 +7,6 @@ import ( "log" "time" - "github.com/cheggaaa/pb" "github.com/hashicorp/packer/packer" "github.com/mitchellh/multistep" ) @@ -62,10 +61,6 @@ func (s *StepDownload) Run(state multistep.StateBag) multistep.StepAction { ui.Say(fmt.Sprintf("Downloading or copying %s", s.Description)) - // Get a default-looking progress bar and connect it to the ui. - bar := GetDefaultProgressBar() - bar.Callback = ui.Message - // First try to use any already downloaded file // If it fails, proceed to regualar download logic @@ -99,7 +94,7 @@ func (s *StepDownload) Run(state multistep.StateBag) multistep.StepAction { } downloadConfigs[i] = config - if match, _ := NewDownloadClient(config, bar).VerifyChecksum(config.TargetPath); match { + if match, _ := NewDownloadClient(config).VerifyChecksum(config.TargetPath); match { ui.Message(fmt.Sprintf("Found already downloaded, initial checksum matched, no download needed: %s", url)) finalPath = config.TargetPath break @@ -141,32 +136,10 @@ func (s *StepDownload) Run(state multistep.StateBag) multistep.StepAction { func (s *StepDownload) Cleanup(multistep.StateBag) {} -func GetDefaultProgressBar() pb.ProgressBar { - bar := pb.New64(0) - bar.ShowPercent = true - bar.ShowCounters = true - bar.ShowSpeed = false - bar.ShowBar = true - bar.ShowTimeLeft = false - bar.ShowFinalTime = false - bar.SetUnits(pb.U_BYTES) - bar.Format("[=>-]") - bar.SetRefreshRate(1 * time.Second) - bar.SetWidth(25) - - return *bar -} - func (s *StepDownload) download(config *DownloadConfig, state multistep.StateBag) (string, error, bool) { var path string ui := state.Get("ui").(packer.Ui) - - // Get a default looking progress bar and connect it to the ui. - bar := GetDefaultProgressBar() - bar.Callback = ui.Message - - // Create download client with config and progress bar - download := NewDownloadClient(config, bar) + download := NewDownloadClient(config) downloadCompleteCh := make(chan error, 1) go func() { @@ -175,19 +148,24 @@ func (s *StepDownload) download(config *DownloadConfig, state multistep.StateBag downloadCompleteCh <- err }() + progressTicker := time.NewTicker(5 * time.Second) + defer progressTicker.Stop() + for { select { case err := <-downloadCompleteCh: - bar.Finish() - if err != nil { return "", err, true } - return path, nil, true + return path, nil, true + case <-progressTicker.C: + progress := download.PercentProgress() + if progress >= 0 { + ui.Message(fmt.Sprintf("Download progress: %d%%", progress)) + } case <-time.After(1 * time.Second): if _, ok := state.GetOk(multistep.StateCancelled); ok { - bar.Finish() ui.Say("Interrupt received. Cancelling download...") return "", nil, false } diff --git a/packer/core.go b/packer/core.go index c8ac2cfb7..7da3e4510 100644 --- a/packer/core.go +++ b/packer/core.go @@ -67,7 +67,7 @@ func NewCore(c *CoreConfig) (*Core, error) { return nil, err } - // Go through and interpolate all the build names. We should be able + // Go through and interpolate all the build names. We shuld be able // to do this at this point with the variables. result.builds = make(map[string]*template.Builder) for _, b := range c.Template.Builders { diff --git a/vendor/github.com/cheggaaa/pb/LICENSE b/vendor/github.com/cheggaaa/pb/LICENSE deleted file mode 100644 index 511970333..000000000 --- a/vendor/github.com/cheggaaa/pb/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2012-2015, Sergey Cherepanov -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/cheggaaa/pb/README.md b/vendor/github.com/cheggaaa/pb/README.md deleted file mode 100644 index 948545110..000000000 --- a/vendor/github.com/cheggaaa/pb/README.md +++ /dev/null @@ -1,179 +0,0 @@ -# Terminal progress bar for Go - -[![Join the chat at https://gitter.im/cheggaaa/pb](https://badges.gitter.im/cheggaaa/pb.svg)](https://gitter.im/cheggaaa/pb?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -Simple progress bar for console programs. - -Please check the new version https://github.com/cheggaaa/pb/tree/v2 (currently, it's beta) - -## Installation - -``` -go get gopkg.in/cheggaaa/pb.v1 -``` - -## Usage - -```Go -package main - -import ( - "gopkg.in/cheggaaa/pb.v1" - "time" -) - -func main() { - count := 100000 - bar := pb.StartNew(count) - for i := 0; i < count; i++ { - bar.Increment() - time.Sleep(time.Millisecond) - } - bar.FinishPrint("The End!") -} - -``` - -Result will be like this: - -``` -> go run test.go -37158 / 100000 [================>_______________________________] 37.16% 1m11s -``` - -## Customization - -```Go -// create bar -bar := pb.New(count) - -// refresh info every second (default 200ms) -bar.SetRefreshRate(time.Second) - -// show percents (by default already true) -bar.ShowPercent = true - -// show bar (by default already true) -bar.ShowBar = true - -// no counters -bar.ShowCounters = false - -// show "time left" -bar.ShowTimeLeft = true - -// show average speed -bar.ShowSpeed = true - -// sets the width of the progress bar -bar.SetWidth(80) - -// sets the width of the progress bar, but if terminal size smaller will be ignored -bar.SetMaxWidth(80) - -// convert output to readable format (like KB, MB) -bar.SetUnits(pb.U_BYTES) - -// and start -bar.Start() -``` - -## Progress bar for IO Operations - -```go -// create and start bar -bar := pb.New(myDataLen).SetUnits(pb.U_BYTES) -bar.Start() - -// my io.Reader -r := myReader - -// my io.Writer -w := myWriter - -// create proxy reader -reader := bar.NewProxyReader(r) - -// and copy from pb reader -io.Copy(w, reader) - -``` - -```go -// create and start bar -bar := pb.New(myDataLen).SetUnits(pb.U_BYTES) -bar.Start() - -// my io.Reader -r := myReader - -// my io.Writer -w := myWriter - -// create multi writer -writer := io.MultiWriter(w, bar) - -// and copy -io.Copy(writer, r) - -bar.Finish() -``` - -## Custom Progress Bar Look-and-feel - -```go -bar.Format("<.- >") -``` - -## Multiple Progress Bars (experimental and unstable) - -Do not print to terminal while pool is active. - -```go -package main - -import ( - "math/rand" - "sync" - "time" - - "gopkg.in/cheggaaa/pb.v1" -) - -func main() { - // create bars - first := pb.New(200).Prefix("First ") - second := pb.New(200).Prefix("Second ") - third := pb.New(200).Prefix("Third ") - // start pool - pool, err := pb.StartPool(first, second, third) - if err != nil { - panic(err) - } - // update bars - wg := new(sync.WaitGroup) - for _, bar := range []*pb.ProgressBar{first, second, third} { - wg.Add(1) - go func(cb *pb.ProgressBar) { - for n := 0; n < 200; n++ { - cb.Increment() - time.Sleep(time.Millisecond * time.Duration(rand.Intn(100))) - } - cb.Finish() - wg.Done() - }(bar) - } - wg.Wait() - // close pool - pool.Stop() -} -``` - -The result will be as follows: - -``` -$ go run example/multiple.go -First 141 / 1000 [===============>---------------------------------------] 14.10 % 44s -Second 139 / 1000 [==============>---------------------------------------] 13.90 % 44s -Third 152 / 1000 [================>--------------------------------------] 15.20 % 40s -``` diff --git a/vendor/github.com/cheggaaa/pb/format.go b/vendor/github.com/cheggaaa/pb/format.go deleted file mode 100644 index 0723561c2..000000000 --- a/vendor/github.com/cheggaaa/pb/format.go +++ /dev/null @@ -1,118 +0,0 @@ -package pb - -import ( - "fmt" - "time" -) - -type Units int - -const ( - // U_NO are default units, they represent a simple value and are not formatted at all. - U_NO Units = iota - // U_BYTES units are formatted in a human readable way (B, KiB, MiB, ...) - U_BYTES - // U_BYTES_DEC units are like U_BYTES, but base 10 (B, KB, MB, ...) - U_BYTES_DEC - // U_DURATION units are formatted in a human readable way (3h14m15s) - U_DURATION -) - -const ( - KiB = 1024 - MiB = 1048576 - GiB = 1073741824 - TiB = 1099511627776 - - KB = 1e3 - MB = 1e6 - GB = 1e9 - TB = 1e12 -) - -func Format(i int64) *formatter { - return &formatter{n: i} -} - -type formatter struct { - n int64 - unit Units - width int - perSec bool -} - -func (f *formatter) To(unit Units) *formatter { - f.unit = unit - return f -} - -func (f *formatter) Width(width int) *formatter { - f.width = width - return f -} - -func (f *formatter) PerSec() *formatter { - f.perSec = true - return f -} - -func (f *formatter) String() (out string) { - switch f.unit { - case U_BYTES: - out = formatBytes(f.n) - case U_BYTES_DEC: - out = formatBytesDec(f.n) - case U_DURATION: - out = formatDuration(f.n) - default: - out = fmt.Sprintf(fmt.Sprintf("%%%dd", f.width), f.n) - } - if f.perSec { - out += "/s" - } - return -} - -// Convert bytes to human readable string. Like 2 MiB, 64.2 KiB, 52 B -func formatBytes(i int64) (result string) { - switch { - case i >= TiB: - result = fmt.Sprintf("%.02f TiB", float64(i)/TiB) - case i >= GiB: - result = fmt.Sprintf("%.02f GiB", float64(i)/GiB) - case i >= MiB: - result = fmt.Sprintf("%.02f MiB", float64(i)/MiB) - case i >= KiB: - result = fmt.Sprintf("%.02f KiB", float64(i)/KiB) - default: - result = fmt.Sprintf("%d B", i) - } - return -} - -// Convert bytes to base-10 human readable string. Like 2 MB, 64.2 KB, 52 B -func formatBytesDec(i int64) (result string) { - switch { - case i >= TB: - result = fmt.Sprintf("%.02f TB", float64(i)/TB) - case i >= GB: - result = fmt.Sprintf("%.02f GB", float64(i)/GB) - case i >= MB: - result = fmt.Sprintf("%.02f MB", float64(i)/MB) - case i >= KB: - result = fmt.Sprintf("%.02f KB", float64(i)/KB) - default: - result = fmt.Sprintf("%d B", i) - } - return -} - -func formatDuration(n int64) (result string) { - d := time.Duration(n) - if d > time.Hour*24 { - result = fmt.Sprintf("%dd", d/24/time.Hour) - d -= (d / time.Hour / 24) * (time.Hour * 24) - } - result = fmt.Sprintf("%s%v", result, d) - return -} diff --git a/vendor/github.com/cheggaaa/pb/pb.go b/vendor/github.com/cheggaaa/pb/pb.go deleted file mode 100644 index 19eb4d1a9..000000000 --- a/vendor/github.com/cheggaaa/pb/pb.go +++ /dev/null @@ -1,469 +0,0 @@ -// Simple console progress bars -package pb - -import ( - "fmt" - "io" - "math" - "strings" - "sync" - "sync/atomic" - "time" - "unicode/utf8" -) - -// Current version -const Version = "1.0.19" - -const ( - // Default refresh rate - 200ms - DEFAULT_REFRESH_RATE = time.Millisecond * 200 - FORMAT = "[=>-]" -) - -// DEPRECATED -// variables for backward compatibility, from now do not work -// use pb.Format and pb.SetRefreshRate -var ( - DefaultRefreshRate = DEFAULT_REFRESH_RATE - BarStart, BarEnd, Empty, Current, CurrentN string -) - -// Create new progress bar object -func New(total int) *ProgressBar { - return New64(int64(total)) -} - -// Create new progress bar object using int64 as total -func New64(total int64) *ProgressBar { - pb := &ProgressBar{ - Total: total, - RefreshRate: DEFAULT_REFRESH_RATE, - ShowPercent: true, - ShowCounters: true, - ShowBar: true, - ShowTimeLeft: true, - ShowFinalTime: true, - Units: U_NO, - ManualUpdate: false, - finish: make(chan struct{}), - } - return pb.Format(FORMAT) -} - -// Create new object and start -func StartNew(total int) *ProgressBar { - return New(total).Start() -} - -// Callback for custom output -// For example: -// bar.Callback = func(s string) { -// mySuperPrint(s) -// } -// -type Callback func(out string) - -type ProgressBar struct { - current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278) - previous int64 - - Total int64 - RefreshRate time.Duration - ShowPercent, ShowCounters bool - ShowSpeed, ShowTimeLeft, ShowBar bool - ShowFinalTime bool - Output io.Writer - Callback Callback - NotPrint bool - Units Units - Width int - ForceWidth bool - ManualUpdate bool - AutoStat bool - - // Default width for the time box. - UnitsWidth int - TimeBoxWidth int - - finishOnce sync.Once //Guards isFinish - finish chan struct{} - isFinish bool - - startTime time.Time - startValue int64 - - changeTime time.Time - - prefix, postfix string - - mu sync.Mutex - lastPrint string - - BarStart string - BarEnd string - Empty string - Current string - CurrentN string - - AlwaysUpdate bool -} - -// Start print -func (pb *ProgressBar) Start() *ProgressBar { - pb.startTime = time.Now() - pb.startValue = atomic.LoadInt64(&pb.current) - if pb.Total == 0 { - pb.ShowTimeLeft = false - pb.ShowPercent = false - pb.AutoStat = false - } - if !pb.ManualUpdate { - pb.Update() // Initial printing of the bar before running the bar refresher. - go pb.refresher() - } - return pb -} - -// Increment current value -func (pb *ProgressBar) Increment() int { - return pb.Add(1) -} - -// Get current value -func (pb *ProgressBar) Get() int64 { - c := atomic.LoadInt64(&pb.current) - return c -} - -// Set current value -func (pb *ProgressBar) Set(current int) *ProgressBar { - return pb.Set64(int64(current)) -} - -// Set64 sets the current value as int64 -func (pb *ProgressBar) Set64(current int64) *ProgressBar { - atomic.StoreInt64(&pb.current, current) - return pb -} - -// Add to current value -func (pb *ProgressBar) Add(add int) int { - return int(pb.Add64(int64(add))) -} - -func (pb *ProgressBar) Add64(add int64) int64 { - return atomic.AddInt64(&pb.current, add) -} - -// Set prefix string -func (pb *ProgressBar) Prefix(prefix string) *ProgressBar { - pb.prefix = prefix - return pb -} - -// Set postfix string -func (pb *ProgressBar) Postfix(postfix string) *ProgressBar { - pb.postfix = postfix - return pb -} - -// Set custom format for bar -// Example: bar.Format("[=>_]") -// Example: bar.Format("[\x00=\x00>\x00-\x00]") // \x00 is the delimiter -func (pb *ProgressBar) Format(format string) *ProgressBar { - var formatEntries []string - if utf8.RuneCountInString(format) == 5 { - formatEntries = strings.Split(format, "") - } else { - formatEntries = strings.Split(format, "\x00") - } - if len(formatEntries) == 5 { - pb.BarStart = formatEntries[0] - pb.BarEnd = formatEntries[4] - pb.Empty = formatEntries[3] - pb.Current = formatEntries[1] - pb.CurrentN = formatEntries[2] - } - return pb -} - -// Set bar refresh rate -func (pb *ProgressBar) SetRefreshRate(rate time.Duration) *ProgressBar { - pb.RefreshRate = rate - return pb -} - -// Set units -// bar.SetUnits(U_NO) - by default -// bar.SetUnits(U_BYTES) - for Mb, Kb, etc -func (pb *ProgressBar) SetUnits(units Units) *ProgressBar { - pb.Units = units - return pb -} - -// Set max width, if width is bigger than terminal width, will be ignored -func (pb *ProgressBar) SetMaxWidth(width int) *ProgressBar { - pb.Width = width - pb.ForceWidth = false - return pb -} - -// Set bar width -func (pb *ProgressBar) SetWidth(width int) *ProgressBar { - pb.Width = width - pb.ForceWidth = true - return pb -} - -// End print -func (pb *ProgressBar) Finish() { - //Protect multiple calls - pb.finishOnce.Do(func() { - close(pb.finish) - pb.write(atomic.LoadInt64(&pb.current)) - pb.mu.Lock() - defer pb.mu.Unlock() - switch { - case pb.Output != nil: - fmt.Fprintln(pb.Output) - case !pb.NotPrint: - fmt.Println() - } - pb.isFinish = true - }) -} - -// IsFinished return boolean -func (pb *ProgressBar) IsFinished() bool { - pb.mu.Lock() - defer pb.mu.Unlock() - return pb.isFinish -} - -// End print and write string 'str' -func (pb *ProgressBar) FinishPrint(str string) { - pb.Finish() - if pb.Output != nil { - fmt.Fprintln(pb.Output, str) - } else { - fmt.Println(str) - } -} - -// implement io.Writer -func (pb *ProgressBar) Write(p []byte) (n int, err error) { - n = len(p) - pb.Add(n) - return -} - -// implement io.Reader -func (pb *ProgressBar) Read(p []byte) (n int, err error) { - n = len(p) - pb.Add(n) - return -} - -// Create new proxy reader over bar -// Takes io.Reader or io.ReadCloser -func (pb *ProgressBar) NewProxyReader(r io.Reader) *Reader { - return &Reader{r, pb} -} - -func (pb *ProgressBar) write(current int64) { - width := pb.GetWidth() - - var percentBox, countersBox, timeLeftBox, speedBox, barBox, end, out string - - // percents - if pb.ShowPercent { - var percent float64 - if pb.Total > 0 { - percent = float64(current) / (float64(pb.Total) / float64(100)) - } else { - percent = float64(current) / float64(100) - } - percentBox = fmt.Sprintf(" %6.02f%%", percent) - } - - // counters - if pb.ShowCounters { - current := Format(current).To(pb.Units).Width(pb.UnitsWidth) - if pb.Total > 0 { - total := Format(pb.Total).To(pb.Units).Width(pb.UnitsWidth) - countersBox = fmt.Sprintf(" %s / %s ", current, total) - } else { - countersBox = fmt.Sprintf(" %s / ? ", current) - } - } - - // time left - pb.mu.Lock() - currentFromStart := current - pb.startValue - fromStart := time.Now().Sub(pb.startTime) - lastChangeTime := pb.changeTime - fromChange := lastChangeTime.Sub(pb.startTime) - pb.mu.Unlock() - select { - case <-pb.finish: - if pb.ShowFinalTime { - var left time.Duration - left = (fromStart / time.Second) * time.Second - timeLeftBox = fmt.Sprintf(" %s", left.String()) - } - default: - if pb.ShowTimeLeft && currentFromStart > 0 { - perEntry := fromChange / time.Duration(currentFromStart) - var left time.Duration - if pb.Total > 0 { - left = time.Duration(pb.Total-currentFromStart) * perEntry - left -= time.Since(lastChangeTime) - left = (left / time.Second) * time.Second - } else { - left = time.Duration(currentFromStart) * perEntry - left = (left / time.Second) * time.Second - } - if left > 0 { - timeLeft := Format(int64(left)).To(U_DURATION).String() - timeLeftBox = fmt.Sprintf(" %s", timeLeft) - } - } - } - - if len(timeLeftBox) < pb.TimeBoxWidth { - timeLeftBox = fmt.Sprintf("%s%s", strings.Repeat(" ", pb.TimeBoxWidth-len(timeLeftBox)), timeLeftBox) - } - - // speed - if pb.ShowSpeed && currentFromStart > 0 { - fromStart := time.Now().Sub(pb.startTime) - speed := float64(currentFromStart) / (float64(fromStart) / float64(time.Second)) - speedBox = " " + Format(int64(speed)).To(pb.Units).Width(pb.UnitsWidth).PerSec().String() - } - - barWidth := escapeAwareRuneCountInString(countersBox + pb.BarStart + pb.BarEnd + percentBox + timeLeftBox + speedBox + pb.prefix + pb.postfix) - // bar - if pb.ShowBar { - size := width - barWidth - if size > 0 { - if pb.Total > 0 { - curSize := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size))) - emptySize := size - curSize - barBox = pb.BarStart - if emptySize < 0 { - emptySize = 0 - } - if curSize > size { - curSize = size - } - - cursorLen := escapeAwareRuneCountInString(pb.Current) - if emptySize <= 0 { - barBox += strings.Repeat(pb.Current, curSize/cursorLen) - } else if curSize > 0 { - cursorEndLen := escapeAwareRuneCountInString(pb.CurrentN) - cursorRepetitions := (curSize - cursorEndLen) / cursorLen - barBox += strings.Repeat(pb.Current, cursorRepetitions) - barBox += pb.CurrentN - } - - emptyLen := escapeAwareRuneCountInString(pb.Empty) - barBox += strings.Repeat(pb.Empty, emptySize/emptyLen) - barBox += pb.BarEnd - } else { - pos := size - int(current)%int(size) - barBox = pb.BarStart - if pos-1 > 0 { - barBox += strings.Repeat(pb.Empty, pos-1) - } - barBox += pb.Current - if size-pos-1 > 0 { - barBox += strings.Repeat(pb.Empty, size-pos-1) - } - barBox += pb.BarEnd - } - } - } - - // check len - out = pb.prefix + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix - if cl := escapeAwareRuneCountInString(out); cl < width { - end = strings.Repeat(" ", width-cl) - } - - // and print! - pb.mu.Lock() - pb.lastPrint = out + end - isFinish := pb.isFinish - pb.mu.Unlock() - switch { - case isFinish: - return - case pb.Output != nil: - fmt.Fprint(pb.Output, "\r"+out+end) - case pb.Callback != nil: - pb.Callback(out + end) - case !pb.NotPrint: - fmt.Print("\r" + out + end) - } -} - -// GetTerminalWidth - returns terminal width for all platforms. -func GetTerminalWidth() (int, error) { - return terminalWidth() -} - -func (pb *ProgressBar) GetWidth() int { - if pb.ForceWidth { - return pb.Width - } - - width := pb.Width - termWidth, _ := terminalWidth() - if width == 0 || termWidth <= width { - width = termWidth - } - - return width -} - -// Write the current state of the progressbar -func (pb *ProgressBar) Update() { - c := atomic.LoadInt64(&pb.current) - p := atomic.LoadInt64(&pb.previous) - if p != c { - pb.mu.Lock() - pb.changeTime = time.Now() - pb.mu.Unlock() - atomic.StoreInt64(&pb.previous, c) - } - pb.write(c) - if pb.AutoStat { - if c == 0 { - pb.startTime = time.Now() - pb.startValue = 0 - } else if c >= pb.Total && pb.isFinish != true { - pb.Finish() - } - } -} - -// String return the last bar print -func (pb *ProgressBar) String() string { - pb.mu.Lock() - defer pb.mu.Unlock() - return pb.lastPrint -} - -// Internal loop for refreshing the progressbar -func (pb *ProgressBar) refresher() { - for { - select { - case <-pb.finish: - return - case <-time.After(pb.RefreshRate): - pb.Update() - } - } -} diff --git a/vendor/github.com/cheggaaa/pb/pb_win.go b/vendor/github.com/cheggaaa/pb/pb_win.go deleted file mode 100644 index 72f682835..000000000 --- a/vendor/github.com/cheggaaa/pb/pb_win.go +++ /dev/null @@ -1,141 +0,0 @@ -// +build windows - -package pb - -import ( - "errors" - "fmt" - "os" - "sync" - "syscall" - "unsafe" -) - -var tty = os.Stdin - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - - // GetConsoleScreenBufferInfo retrieves information about the - // specified console screen buffer. - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") - - // GetConsoleMode retrieves the current input mode of a console's - // input buffer or the current output mode of a console screen buffer. - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - getConsoleMode = kernel32.NewProc("GetConsoleMode") - - // SetConsoleMode sets the input mode of a console's input buffer - // or the output mode of a console screen buffer. - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - setConsoleMode = kernel32.NewProc("SetConsoleMode") - - // SetConsoleCursorPosition sets the cursor position in the - // specified console screen buffer. - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx - setConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") -) - -type ( - // Defines the coordinates of the upper left and lower right corners - // of a rectangle. - // See - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311(v=vs.85).aspx - smallRect struct { - Left, Top, Right, Bottom int16 - } - - // Defines the coordinates of a character cell in a console screen - // buffer. The origin of the coordinate system (0,0) is at the top, left cell - // of the buffer. - // See - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119(v=vs.85).aspx - coordinates struct { - X, Y int16 - } - - word int16 - - // Contains information about a console screen buffer. - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx - consoleScreenBufferInfo struct { - dwSize coordinates - dwCursorPosition coordinates - wAttributes word - srWindow smallRect - dwMaximumWindowSize coordinates - } -) - -// terminalWidth returns width of the terminal. -func terminalWidth() (width int, err error) { - var info consoleScreenBufferInfo - _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0) - if e != 0 { - return 0, error(e) - } - return int(info.dwSize.X) - 1, nil -} - -func getCursorPos() (pos coordinates, err error) { - var info consoleScreenBufferInfo - _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0) - if e != 0 { - return info.dwCursorPosition, error(e) - } - return info.dwCursorPosition, nil -} - -func setCursorPos(pos coordinates) error { - _, _, e := syscall.Syscall(setConsoleCursorPosition.Addr(), 2, uintptr(syscall.Stdout), uintptr(uint32(uint16(pos.Y))<<16|uint32(uint16(pos.X))), 0) - if e != 0 { - return error(e) - } - return nil -} - -var ErrPoolWasStarted = errors.New("Bar pool was started") - -var echoLocked bool -var echoLockMutex sync.Mutex - -var oldState word - -func lockEcho() (quit chan int, err error) { - echoLockMutex.Lock() - defer echoLockMutex.Unlock() - if echoLocked { - err = ErrPoolWasStarted - return - } - echoLocked = true - - if _, _, e := syscall.Syscall(getConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&oldState)), 0); e != 0 { - err = fmt.Errorf("Can't get terminal settings: %v", e) - return - } - - newState := oldState - const ENABLE_ECHO_INPUT = 0x0004 - const ENABLE_LINE_INPUT = 0x0002 - newState = newState & (^(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT)) - if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(newState), 0); e != 0 { - err = fmt.Errorf("Can't set terminal settings: %v", e) - return - } - return -} - -func unlockEcho() (err error) { - echoLockMutex.Lock() - defer echoLockMutex.Unlock() - if !echoLocked { - return - } - echoLocked = false - if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(oldState), 0); e != 0 { - err = fmt.Errorf("Can't set terminal settings") - } - return -} diff --git a/vendor/github.com/cheggaaa/pb/pb_x.go b/vendor/github.com/cheggaaa/pb/pb_x.go deleted file mode 100644 index bbbe7c2d6..000000000 --- a/vendor/github.com/cheggaaa/pb/pb_x.go +++ /dev/null @@ -1,108 +0,0 @@ -// +build linux darwin freebsd netbsd openbsd solaris dragonfly -// +build !appengine - -package pb - -import ( - "errors" - "fmt" - "os" - "os/signal" - "sync" - "syscall" - - "golang.org/x/sys/unix" -) - -var ErrPoolWasStarted = errors.New("Bar pool was started") - -var ( - echoLockMutex sync.Mutex - origTermStatePtr *unix.Termios - tty *os.File -) - -func init() { - echoLockMutex.Lock() - defer echoLockMutex.Unlock() - - var err error - tty, err = os.Open("/dev/tty") - if err != nil { - tty = os.Stdin - } -} - -// terminalWidth returns width of the terminal. -func terminalWidth() (int, error) { - echoLockMutex.Lock() - defer echoLockMutex.Unlock() - - fd := int(tty.Fd()) - - ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) - if err != nil { - return 0, err - } - - return int(ws.Col), nil -} - -func lockEcho() (quit chan int, err error) { - echoLockMutex.Lock() - defer echoLockMutex.Unlock() - if origTermStatePtr != nil { - return quit, ErrPoolWasStarted - } - - fd := int(tty.Fd()) - - oldTermStatePtr, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - if err != nil { - return nil, fmt.Errorf("Can't get terminal settings: %v", err) - } - - oldTermios := *oldTermStatePtr - newTermios := oldTermios - newTermios.Lflag &^= syscall.ECHO - newTermios.Lflag |= syscall.ICANON | syscall.ISIG - newTermios.Iflag |= syscall.ICRNL - if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newTermios); err != nil { - return nil, fmt.Errorf("Can't set terminal settings: %v", err) - } - - quit = make(chan int, 1) - go catchTerminate(quit) - return -} - -func unlockEcho() error { - echoLockMutex.Lock() - defer echoLockMutex.Unlock() - if origTermStatePtr == nil { - return nil - } - - fd := int(tty.Fd()) - - if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, origTermStatePtr); err != nil { - return fmt.Errorf("Can't set terminal settings: %v", err) - } - - origTermStatePtr = nil - - return nil -} - -// listen exit signals and restore terminal state -func catchTerminate(quit chan int) { - sig := make(chan os.Signal, 1) - signal.Notify(sig, os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL) - defer signal.Stop(sig) - select { - case <-quit: - unlockEcho() - case <-sig: - unlockEcho() - } -} diff --git a/vendor/github.com/cheggaaa/pb/pool.go b/vendor/github.com/cheggaaa/pb/pool.go deleted file mode 100644 index bc1a13886..000000000 --- a/vendor/github.com/cheggaaa/pb/pool.go +++ /dev/null @@ -1,82 +0,0 @@ -// +build linux darwin freebsd netbsd openbsd solaris dragonfly windows - -package pb - -import ( - "io" - "sync" - "time" -) - -// Create and start new pool with given bars -// You need call pool.Stop() after work -func StartPool(pbs ...*ProgressBar) (pool *Pool, err error) { - pool = new(Pool) - if err = pool.start(); err != nil { - return - } - pool.Add(pbs...) - return -} - -type Pool struct { - Output io.Writer - RefreshRate time.Duration - bars []*ProgressBar - lastBarsCount int - quit chan int - m sync.Mutex - finishOnce sync.Once -} - -// Add progress bars. -func (p *Pool) Add(pbs ...*ProgressBar) { - p.m.Lock() - defer p.m.Unlock() - for _, bar := range pbs { - bar.ManualUpdate = true - bar.NotPrint = true - bar.Start() - p.bars = append(p.bars, bar) - } -} - -func (p *Pool) start() (err error) { - p.RefreshRate = DefaultRefreshRate - quit, err := lockEcho() - if err != nil { - return - } - p.quit = make(chan int) - go p.writer(quit) - return -} - -func (p *Pool) writer(finish chan int) { - var first = true - for { - select { - case <-time.After(p.RefreshRate): - if p.print(first) { - p.print(false) - finish <- 1 - return - } - first = false - case <-p.quit: - finish <- 1 - return - } - } -} - -// Restore terminal state and close pool -func (p *Pool) Stop() error { - // Wait until one final refresh has passed. - time.Sleep(p.RefreshRate) - - p.finishOnce.Do(func() { - close(p.quit) - }) - return unlockEcho() -} diff --git a/vendor/github.com/cheggaaa/pb/pool_win.go b/vendor/github.com/cheggaaa/pb/pool_win.go deleted file mode 100644 index 63598d378..000000000 --- a/vendor/github.com/cheggaaa/pb/pool_win.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build windows - -package pb - -import ( - "fmt" - "log" -) - -func (p *Pool) print(first bool) bool { - p.m.Lock() - defer p.m.Unlock() - var out string - if !first { - coords, err := getCursorPos() - if err != nil { - log.Panic(err) - } - coords.Y -= int16(p.lastBarsCount) - if coords.Y < 0 { - coords.Y = 0 - } - coords.X = 0 - - err = setCursorPos(coords) - if err != nil { - log.Panic(err) - } - } - isFinished := true - for _, bar := range p.bars { - if !bar.IsFinished() { - isFinished = false - } - bar.Update() - out += fmt.Sprintf("\r%s\n", bar.String()) - } - if p.Output != nil { - fmt.Fprint(p.Output, out) - } else { - fmt.Print(out) - } - p.lastBarsCount = len(p.bars) - return isFinished -} diff --git a/vendor/github.com/cheggaaa/pb/pool_x.go b/vendor/github.com/cheggaaa/pb/pool_x.go deleted file mode 100644 index a8ae14d2f..000000000 --- a/vendor/github.com/cheggaaa/pb/pool_x.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build linux darwin freebsd netbsd openbsd solaris dragonfly - -package pb - -import "fmt" - -func (p *Pool) print(first bool) bool { - p.m.Lock() - defer p.m.Unlock() - var out string - if !first { - out = fmt.Sprintf("\033[%dA", p.lastBarsCount) - } - isFinished := true - for _, bar := range p.bars { - if !bar.IsFinished() { - isFinished = false - } - bar.Update() - out += fmt.Sprintf("\r%s\n", bar.String()) - } - if p.Output != nil { - fmt.Fprint(p.Output, out) - } else { - fmt.Print(out) - } - p.lastBarsCount = len(p.bars) - return isFinished -} diff --git a/vendor/github.com/cheggaaa/pb/reader.go b/vendor/github.com/cheggaaa/pb/reader.go deleted file mode 100644 index 9f3148b54..000000000 --- a/vendor/github.com/cheggaaa/pb/reader.go +++ /dev/null @@ -1,25 +0,0 @@ -package pb - -import ( - "io" -) - -// It's proxy reader, implement io.Reader -type Reader struct { - io.Reader - bar *ProgressBar -} - -func (r *Reader) Read(p []byte) (n int, err error) { - n, err = r.Reader.Read(p) - r.bar.Add(n) - return -} - -// Close the reader when it implements io.Closer -func (r *Reader) Close() (err error) { - if closer, ok := r.Reader.(io.Closer); ok { - return closer.Close() - } - return -} diff --git a/vendor/github.com/cheggaaa/pb/runecount.go b/vendor/github.com/cheggaaa/pb/runecount.go deleted file mode 100644 index c617c55ec..000000000 --- a/vendor/github.com/cheggaaa/pb/runecount.go +++ /dev/null @@ -1,17 +0,0 @@ -package pb - -import ( - "github.com/mattn/go-runewidth" - "regexp" -) - -// Finds the control character sequences (like colors) -var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d") - -func escapeAwareRuneCountInString(s string) int { - n := runewidth.StringWidth(s) - for _, sm := range ctrlFinder.FindAllString(s, -1) { - n -= runewidth.StringWidth(sm) - } - return n -} diff --git a/vendor/github.com/cheggaaa/pb/termios_bsd.go b/vendor/github.com/cheggaaa/pb/termios_bsd.go deleted file mode 100644 index 517ea8ed7..000000000 --- a/vendor/github.com/cheggaaa/pb/termios_bsd.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build darwin freebsd netbsd openbsd dragonfly -// +build !appengine - -package pb - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA -const ioctlWriteTermios = syscall.TIOCSETA diff --git a/vendor/github.com/cheggaaa/pb/termios_sysv.go b/vendor/github.com/cheggaaa/pb/termios_sysv.go deleted file mode 100644 index b10f61859..000000000 --- a/vendor/github.com/cheggaaa/pb/termios_sysv.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux solaris -// +build !appengine - -package pb - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS -const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/github.com/jlaffaye/ftp/LICENSE b/vendor/github.com/jlaffaye/ftp/LICENSE deleted file mode 100644 index 9ab085c51..000000000 --- a/vendor/github.com/jlaffaye/ftp/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright (c) 2011-2013, Julien Laffaye - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/jlaffaye/ftp/README.md b/vendor/github.com/jlaffaye/ftp/README.md deleted file mode 100644 index b711be7ad..000000000 --- a/vendor/github.com/jlaffaye/ftp/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# goftp # - -[![Build Status](https://travis-ci.org/jlaffaye/ftp.svg?branch=master)](https://travis-ci.org/jlaffaye/ftp) -[![Coverage Status](https://coveralls.io/repos/jlaffaye/ftp/badge.svg?branch=master&service=github)](https://coveralls.io/github/jlaffaye/ftp?branch=master) -[![Go ReportCard](http://goreportcard.com/badge/jlaffaye/ftp)](http://goreportcard.com/report/jlaffaye/ftp) - -A FTP client package for Go - -## Install ## - -``` -go get -u github.com/jlaffaye/ftp -``` - -## Documentation ## - -http://godoc.org/github.com/jlaffaye/ftp diff --git a/vendor/github.com/jlaffaye/ftp/ftp.go b/vendor/github.com/jlaffaye/ftp/ftp.go deleted file mode 100644 index db7ac9277..000000000 --- a/vendor/github.com/jlaffaye/ftp/ftp.go +++ /dev/null @@ -1,671 +0,0 @@ -// Package ftp implements a FTP client as described in RFC 959. -package ftp - -import ( - "bufio" - "errors" - "io" - "net" - "net/textproto" - "strconv" - "strings" - "time" -) - -// EntryType describes the different types of an Entry. -type EntryType int - -// The differents types of an Entry -const ( - EntryTypeFile EntryType = iota - EntryTypeFolder - EntryTypeLink -) - -// ServerConn represents the connection to a remote FTP server. -type ServerConn struct { - conn *textproto.Conn - host string - timeout time.Duration - features map[string]string -} - -// Entry describes a file and is returned by List(). -type Entry struct { - Name string - Type EntryType - Size uint64 - Time time.Time -} - -// response represent a data-connection -type response struct { - conn net.Conn - c *ServerConn -} - -// Connect is an alias to Dial, for backward compatibility -func Connect(addr string) (*ServerConn, error) { - return Dial(addr) -} - -// Dial is like DialTimeout with no timeout -func Dial(addr string) (*ServerConn, error) { - return DialTimeout(addr, 0) -} - -// DialTimeout initializes the connection to the specified ftp server address. -// -// It is generally followed by a call to Login() as most FTP commands require -// an authenticated user. -func DialTimeout(addr string, timeout time.Duration) (*ServerConn, error) { - tconn, err := net.DialTimeout("tcp", addr, timeout) - if err != nil { - return nil, err - } - - // Use the resolved IP address in case addr contains a domain name - // If we use the domain name, we might not resolve to the same IP. - remoteAddr := tconn.RemoteAddr().String() - host, _, err := net.SplitHostPort(remoteAddr) - if err != nil { - return nil, err - } - - conn := textproto.NewConn(tconn) - - c := &ServerConn{ - conn: conn, - host: host, - timeout: timeout, - features: make(map[string]string), - } - - _, _, err = c.conn.ReadResponse(StatusReady) - if err != nil { - c.Quit() - return nil, err - } - - err = c.feat() - if err != nil { - c.Quit() - return nil, err - } - - return c, nil -} - -// Login authenticates the client with specified user and password. -// -// "anonymous"/"anonymous" is a common user/password scheme for FTP servers -// that allows anonymous read-only accounts. -func (c *ServerConn) Login(user, password string) error { - code, message, err := c.cmd(-1, "USER %s", user) - if err != nil { - return err - } - - switch code { - case StatusLoggedIn: - case StatusUserOK: - _, _, err = c.cmd(StatusLoggedIn, "PASS %s", password) - if err != nil { - return err - } - default: - return errors.New(message) - } - - // Switch to binary mode - _, _, err = c.cmd(StatusCommandOK, "TYPE I") - if err != nil { - return err - } - - return nil -} - -// feat issues a FEAT FTP command to list the additional commands supported by -// the remote FTP server. -// FEAT is described in RFC 2389 -func (c *ServerConn) feat() error { - code, message, err := c.cmd(-1, "FEAT") - if err != nil { - return err - } - - if code != StatusSystem { - // The server does not support the FEAT command. This is not an - // error: we consider that there is no additional feature. - return nil - } - - lines := strings.Split(message, "\n") - for _, line := range lines { - if !strings.HasPrefix(line, " ") { - continue - } - - line = strings.TrimSpace(line) - featureElements := strings.SplitN(line, " ", 2) - - command := featureElements[0] - - var commandDesc string - if len(featureElements) == 2 { - commandDesc = featureElements[1] - } - - c.features[command] = commandDesc - } - - return nil -} - -// epsv issues an "EPSV" command to get a port number for a data connection. -func (c *ServerConn) epsv() (port int, err error) { - _, line, err := c.cmd(StatusExtendedPassiveMode, "EPSV") - if err != nil { - return - } - - start := strings.Index(line, "|||") - end := strings.LastIndex(line, "|") - if start == -1 || end == -1 { - err = errors.New("Invalid EPSV response format") - return - } - port, err = strconv.Atoi(line[start+3 : end]) - return -} - -// pasv issues a "PASV" command to get a port number for a data connection. -func (c *ServerConn) pasv() (port int, err error) { - _, line, err := c.cmd(StatusPassiveMode, "PASV") - if err != nil { - return - } - - // PASV response format : 227 Entering Passive Mode (h1,h2,h3,h4,p1,p2). - start := strings.Index(line, "(") - end := strings.LastIndex(line, ")") - if start == -1 || end == -1 { - return 0, errors.New("Invalid PASV response format") - } - - // We have to split the response string - pasvData := strings.Split(line[start+1:end], ",") - - if len(pasvData) < 6 { - return 0, errors.New("Invalid PASV response format") - } - - // Let's compute the port number - portPart1, err1 := strconv.Atoi(pasvData[4]) - if err1 != nil { - err = err1 - return - } - - portPart2, err2 := strconv.Atoi(pasvData[5]) - if err2 != nil { - err = err2 - return - } - - // Recompose port - port = portPart1*256 + portPart2 - return -} - -// openDataConn creates a new FTP data connection. -func (c *ServerConn) openDataConn() (net.Conn, error) { - var ( - port int - err error - ) - - if port, err = c.epsv(); err != nil { - if port, err = c.pasv(); err != nil { - return nil, err - } - } - - return net.DialTimeout("tcp", net.JoinHostPort(c.host, strconv.Itoa(port)), c.timeout) -} - -// cmd is a helper function to execute a command and check for the expected FTP -// return code -func (c *ServerConn) cmd(expected int, format string, args ...interface{}) (int, string, error) { - _, err := c.conn.Cmd(format, args...) - if err != nil { - return 0, "", err - } - - return c.conn.ReadResponse(expected) -} - -// cmdDataConnFrom executes a command which require a FTP data connection. -// Issues a REST FTP command to specify the number of bytes to skip for the transfer. -func (c *ServerConn) cmdDataConnFrom(offset uint64, format string, args ...interface{}) (net.Conn, error) { - conn, err := c.openDataConn() - if err != nil { - return nil, err - } - - if offset != 0 { - _, _, err := c.cmd(StatusRequestFilePending, "REST %d", offset) - if err != nil { - return nil, err - } - } - - _, err = c.conn.Cmd(format, args...) - if err != nil { - conn.Close() - return nil, err - } - - code, msg, err := c.conn.ReadResponse(-1) - if err != nil { - conn.Close() - return nil, err - } - if code != StatusAlreadyOpen && code != StatusAboutToSend { - conn.Close() - return nil, &textproto.Error{Code: code, Msg: msg} - } - - return conn, nil -} - -var errUnsupportedListLine = errors.New("Unsupported LIST line") - -// parseRFC3659ListLine parses the style of directory line defined in RFC 3659. -func parseRFC3659ListLine(line string) (*Entry, error) { - iSemicolon := strings.Index(line, ";") - iWhitespace := strings.Index(line, " ") - - if iSemicolon < 0 || iSemicolon > iWhitespace { - return nil, errUnsupportedListLine - } - - e := &Entry{ - Name: line[iWhitespace+1:], - } - - for _, field := range strings.Split(line[:iWhitespace-1], ";") { - i := strings.Index(field, "=") - if i < 1 { - return nil, errUnsupportedListLine - } - - key := field[:i] - value := field[i+1:] - - switch key { - case "modify": - var err error - e.Time, err = time.Parse("20060102150405", value) - if err != nil { - return nil, err - } - case "type": - switch value { - case "dir", "cdir", "pdir": - e.Type = EntryTypeFolder - case "file": - e.Type = EntryTypeFile - } - case "size": - e.setSize(value) - } - } - return e, nil -} - -// parseLsListLine parses a directory line in a format based on the output of -// the UNIX ls command. -func parseLsListLine(line string) (*Entry, error) { - fields := strings.Fields(line) - if len(fields) >= 7 && fields[1] == "folder" && fields[2] == "0" { - e := &Entry{ - Type: EntryTypeFolder, - Name: strings.Join(fields[6:], " "), - } - if err := e.setTime(fields[3:6]); err != nil { - return nil, err - } - - return e, nil - } - - if len(fields) < 8 { - return nil, errUnsupportedListLine - } - - if fields[1] == "0" { - e := &Entry{ - Type: EntryTypeFile, - Name: strings.Join(fields[7:], " "), - } - - if err := e.setSize(fields[2]); err != nil { - return nil, err - } - if err := e.setTime(fields[4:7]); err != nil { - return nil, err - } - - return e, nil - } - - if len(fields) < 9 { - return nil, errUnsupportedListLine - } - - e := &Entry{} - switch fields[0][0] { - case '-': - e.Type = EntryTypeFile - if err := e.setSize(fields[4]); err != nil { - return nil, err - } - case 'd': - e.Type = EntryTypeFolder - case 'l': - e.Type = EntryTypeLink - default: - return nil, errors.New("Unknown entry type") - } - - if err := e.setTime(fields[5:8]); err != nil { - return nil, err - } - - e.Name = strings.Join(fields[8:], " ") - return e, nil -} - -var dirTimeFormats = []string{ - "01-02-06 03:04PM", - "2006-01-02 15:04", -} - -// parseDirListLine parses a directory line in a format based on the output of -// the MS-DOS DIR command. -func parseDirListLine(line string) (*Entry, error) { - e := &Entry{} - var err error - - // Try various time formats that DIR might use, and stop when one works. - for _, format := range dirTimeFormats { - if len(line) > len(format) { - e.Time, err = time.Parse(format, line[:len(format)]) - if err == nil { - line = line[len(format):] - break - } - } - } - if err != nil { - // None of the time formats worked. - return nil, errUnsupportedListLine - } - - line = strings.TrimLeft(line, " ") - if strings.HasPrefix(line, "") { - e.Type = EntryTypeFolder - line = strings.TrimPrefix(line, "") - } else { - space := strings.Index(line, " ") - if space == -1 { - return nil, errUnsupportedListLine - } - e.Size, err = strconv.ParseUint(line[:space], 10, 64) - if err != nil { - return nil, errUnsupportedListLine - } - e.Type = EntryTypeFile - line = line[space:] - } - - e.Name = strings.TrimLeft(line, " ") - return e, nil -} - -var listLineParsers = []func(line string) (*Entry, error){ - parseRFC3659ListLine, - parseLsListLine, - parseDirListLine, -} - -// parseListLine parses the various non-standard format returned by the LIST -// FTP command. -func parseListLine(line string) (*Entry, error) { - for _, f := range listLineParsers { - e, err := f(line) - if err == errUnsupportedListLine { - // Try another format. - continue - } - return e, err - } - return nil, errUnsupportedListLine -} - -func (e *Entry) setSize(str string) (err error) { - e.Size, err = strconv.ParseUint(str, 0, 64) - return -} - -func (e *Entry) setTime(fields []string) (err error) { - var timeStr string - if strings.Contains(fields[2], ":") { // this year - thisYear, _, _ := time.Now().Date() - timeStr = fields[1] + " " + fields[0] + " " + strconv.Itoa(thisYear)[2:4] + " " + fields[2] + " GMT" - } else { // not this year - if len(fields[2]) != 4 { - return errors.New("Invalid year format in time string") - } - timeStr = fields[1] + " " + fields[0] + " " + fields[2][2:4] + " 00:00 GMT" - } - e.Time, err = time.Parse("_2 Jan 06 15:04 MST", timeStr) - return -} - -// NameList issues an NLST FTP command. -func (c *ServerConn) NameList(path string) (entries []string, err error) { - conn, err := c.cmdDataConnFrom(0, "NLST %s", path) - if err != nil { - return - } - - r := &response{conn, c} - defer r.Close() - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - entries = append(entries, scanner.Text()) - } - if err = scanner.Err(); err != nil { - return entries, err - } - return -} - -// List issues a LIST FTP command. -func (c *ServerConn) List(path string) (entries []*Entry, err error) { - conn, err := c.cmdDataConnFrom(0, "LIST %s", path) - if err != nil { - return - } - - r := &response{conn, c} - defer r.Close() - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - entry, err := parseListLine(line) - if err == nil { - entries = append(entries, entry) - } - } - if err := scanner.Err(); err != nil { - return nil, err - } - return -} - -// ChangeDir issues a CWD FTP command, which changes the current directory to -// the specified path. -func (c *ServerConn) ChangeDir(path string) error { - _, _, err := c.cmd(StatusRequestedFileActionOK, "CWD %s", path) - return err -} - -// ChangeDirToParent issues a CDUP FTP command, which changes the current -// directory to the parent directory. This is similar to a call to ChangeDir -// with a path set to "..". -func (c *ServerConn) ChangeDirToParent() error { - _, _, err := c.cmd(StatusRequestedFileActionOK, "CDUP") - return err -} - -// CurrentDir issues a PWD FTP command, which Returns the path of the current -// directory. -func (c *ServerConn) CurrentDir() (string, error) { - _, msg, err := c.cmd(StatusPathCreated, "PWD") - if err != nil { - return "", err - } - - start := strings.Index(msg, "\"") - end := strings.LastIndex(msg, "\"") - - if start == -1 || end == -1 { - return "", errors.New("Unsuported PWD response format") - } - - return msg[start+1 : end], nil -} - -// Retr issues a RETR FTP command to fetch the specified file from the remote -// FTP server. -// -// The returned ReadCloser must be closed to cleanup the FTP data connection. -func (c *ServerConn) Retr(path string) (io.ReadCloser, error) { - return c.RetrFrom(path, 0) -} - -// RetrFrom issues a RETR FTP command to fetch the specified file from the remote -// FTP server, the server will not send the offset first bytes of the file. -// -// The returned ReadCloser must be closed to cleanup the FTP data connection. -func (c *ServerConn) RetrFrom(path string, offset uint64) (io.ReadCloser, error) { - conn, err := c.cmdDataConnFrom(offset, "RETR %s", path) - if err != nil { - return nil, err - } - - return &response{conn, c}, nil -} - -// Stor issues a STOR FTP command to store a file to the remote FTP server. -// Stor creates the specified file with the content of the io.Reader. -// -// Hint: io.Pipe() can be used if an io.Writer is required. -func (c *ServerConn) Stor(path string, r io.Reader) error { - return c.StorFrom(path, r, 0) -} - -// StorFrom issues a STOR FTP command to store a file to the remote FTP server. -// Stor creates the specified file with the content of the io.Reader, writing -// on the server will start at the given file offset. -// -// Hint: io.Pipe() can be used if an io.Writer is required. -func (c *ServerConn) StorFrom(path string, r io.Reader, offset uint64) error { - conn, err := c.cmdDataConnFrom(offset, "STOR %s", path) - if err != nil { - return err - } - - _, err = io.Copy(conn, r) - conn.Close() - if err != nil { - return err - } - - _, _, err = c.conn.ReadResponse(StatusClosingDataConnection) - return err -} - -// Rename renames a file on the remote FTP server. -func (c *ServerConn) Rename(from, to string) error { - _, _, err := c.cmd(StatusRequestFilePending, "RNFR %s", from) - if err != nil { - return err - } - - _, _, err = c.cmd(StatusRequestedFileActionOK, "RNTO %s", to) - return err -} - -// Delete issues a DELE FTP command to delete the specified file from the -// remote FTP server. -func (c *ServerConn) Delete(path string) error { - _, _, err := c.cmd(StatusRequestedFileActionOK, "DELE %s", path) - return err -} - -// MakeDir issues a MKD FTP command to create the specified directory on the -// remote FTP server. -func (c *ServerConn) MakeDir(path string) error { - _, _, err := c.cmd(StatusPathCreated, "MKD %s", path) - return err -} - -// RemoveDir issues a RMD FTP command to remove the specified directory from -// the remote FTP server. -func (c *ServerConn) RemoveDir(path string) error { - _, _, err := c.cmd(StatusRequestedFileActionOK, "RMD %s", path) - return err -} - -// NoOp issues a NOOP FTP command. -// NOOP has no effects and is usually used to prevent the remote FTP server to -// close the otherwise idle connection. -func (c *ServerConn) NoOp() error { - _, _, err := c.cmd(StatusCommandOK, "NOOP") - return err -} - -// Logout issues a REIN FTP command to logout the current user. -func (c *ServerConn) Logout() error { - _, _, err := c.cmd(StatusReady, "REIN") - return err -} - -// Quit issues a QUIT FTP command to properly close the connection from the -// remote FTP server. -func (c *ServerConn) Quit() error { - c.conn.Cmd("QUIT") - return c.conn.Close() -} - -// Read implements the io.Reader interface on a FTP data connection. -func (r *response) Read(buf []byte) (int, error) { - return r.conn.Read(buf) -} - -// Close implements the io.Closer interface on a FTP data connection. -func (r *response) Close() error { - err := r.conn.Close() - _, _, err2 := r.c.conn.ReadResponse(StatusClosingDataConnection) - if err2 != nil { - err = err2 - } - return err -} diff --git a/vendor/github.com/jlaffaye/ftp/status.go b/vendor/github.com/jlaffaye/ftp/status.go deleted file mode 100644 index e90ca6211..000000000 --- a/vendor/github.com/jlaffaye/ftp/status.go +++ /dev/null @@ -1,106 +0,0 @@ -package ftp - -// FTP status codes, defined in RFC 959 -const ( - StatusInitiating = 100 - StatusRestartMarker = 110 - StatusReadyMinute = 120 - StatusAlreadyOpen = 125 - StatusAboutToSend = 150 - - StatusCommandOK = 200 - StatusCommandNotImplemented = 202 - StatusSystem = 211 - StatusDirectory = 212 - StatusFile = 213 - StatusHelp = 214 - StatusName = 215 - StatusReady = 220 - StatusClosing = 221 - StatusDataConnectionOpen = 225 - StatusClosingDataConnection = 226 - StatusPassiveMode = 227 - StatusLongPassiveMode = 228 - StatusExtendedPassiveMode = 229 - StatusLoggedIn = 230 - StatusLoggedOut = 231 - StatusLogoutAck = 232 - StatusRequestedFileActionOK = 250 - StatusPathCreated = 257 - - StatusUserOK = 331 - StatusLoginNeedAccount = 332 - StatusRequestFilePending = 350 - - StatusNotAvailable = 421 - StatusCanNotOpenDataConnection = 425 - StatusTransfertAborted = 426 - StatusInvalidCredentials = 430 - StatusHostUnavailable = 434 - StatusFileActionIgnored = 450 - StatusActionAborted = 451 - Status452 = 452 - - StatusBadCommand = 500 - StatusBadArguments = 501 - StatusNotImplemented = 502 - StatusBadSequence = 503 - StatusNotImplementedParameter = 504 - StatusNotLoggedIn = 530 - StatusStorNeedAccount = 532 - StatusFileUnavailable = 550 - StatusPageTypeUnknown = 551 - StatusExceededStorage = 552 - StatusBadFileName = 553 -) - -var statusText = map[int]string{ - // 200 - StatusCommandOK: "Command okay.", - StatusCommandNotImplemented: "Command not implemented, superfluous at this site.", - StatusSystem: "System status, or system help reply.", - StatusDirectory: "Directory status.", - StatusFile: "File status.", - StatusHelp: "Help message.", - StatusName: "", - StatusReady: "Service ready for new user.", - StatusClosing: "Service closing control connection.", - StatusDataConnectionOpen: "Data connection open; no transfer in progress.", - StatusClosingDataConnection: "Closing data connection. Requested file action successful.", - StatusPassiveMode: "Entering Passive Mode.", - StatusLongPassiveMode: "Entering Long Passive Mode.", - StatusExtendedPassiveMode: "Entering Extended Passive Mode.", - StatusLoggedIn: "User logged in, proceed.", - StatusLoggedOut: "User logged out; service terminated.", - StatusLogoutAck: "Logout command noted, will complete when transfer done.", - StatusRequestedFileActionOK: "Requested file action okay, completed.", - StatusPathCreated: "Path created.", - - // 300 - StatusUserOK: "User name okay, need password.", - StatusLoginNeedAccount: "Need account for login.", - StatusRequestFilePending: "Requested file action pending further information.", - - // 400 - StatusNotAvailable: "Service not available, closing control connection.", - StatusCanNotOpenDataConnection: "Can't open data connection.", - StatusTransfertAborted: "Connection closed; transfer aborted.", - StatusInvalidCredentials: "Invalid username or password.", - StatusHostUnavailable: "Requested host unavailable.", - StatusFileActionIgnored: "Requested file action not taken.", - StatusActionAborted: "Requested action aborted. Local error in processing.", - Status452: "Insufficient storage space in system.", - - // 500 - StatusBadCommand: "Command unrecognized.", - StatusBadArguments: "Syntax error in parameters or arguments.", - StatusNotImplemented: "Command not implemented.", - StatusBadSequence: "Bad sequence of commands.", - StatusNotImplementedParameter: "Command not implemented for that parameter.", - StatusNotLoggedIn: "Not logged in.", - StatusStorNeedAccount: "Need account for storing files.", - StatusFileUnavailable: "File unavailable.", - StatusPageTypeUnknown: "Page type unknown.", - StatusExceededStorage: "Exceeded storage allocation.", - StatusBadFileName: "File name not allowed.", -} diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE deleted file mode 100644 index 91b5cef30..000000000 --- a/vendor/github.com/mattn/go-runewidth/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mattn/go-runewidth/README.mkd b/vendor/github.com/mattn/go-runewidth/README.mkd deleted file mode 100644 index 66663a94b..000000000 --- a/vendor/github.com/mattn/go-runewidth/README.mkd +++ /dev/null @@ -1,27 +0,0 @@ -go-runewidth -============ - -[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth) -[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD) -[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth) -[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth) - -Provides functions to get fixed width of the character or string. - -Usage ------ - -```go -runewidth.StringWidth("つのだ☆HIRO") == 12 -``` - - -Author ------- - -Yasuhiro Matsumoto - -License -------- - -under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go deleted file mode 100644 index 2164497ad..000000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth.go +++ /dev/null @@ -1,1223 +0,0 @@ -package runewidth - -var ( - // EastAsianWidth will be set true if the current locale is CJK - EastAsianWidth = IsEastAsian() - - // DefaultCondition is a condition in current locale - DefaultCondition = &Condition{EastAsianWidth} -) - -type interval struct { - first rune - last rune -} - -type table []interval - -func inTables(r rune, ts ...table) bool { - for _, t := range ts { - if inTable(r, t) { - return true - } - } - return false -} - -func inTable(r rune, t table) bool { - // func (t table) IncludesRune(r rune) bool { - if r < t[0].first { - return false - } - - bot := 0 - top := len(t) - 1 - for top >= bot { - mid := (bot + top) / 2 - - switch { - case t[mid].last < r: - bot = mid + 1 - case t[mid].first > r: - top = mid - 1 - default: - return true - } - } - - return false -} - -var private = table{ - {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD}, -} - -var nonprint = table{ - {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD}, - {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F}, - {0x202A, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF}, - {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF}, -} - -var combining = table{ - {0x0300, 0x036F}, {0x0483, 0x0489}, {0x0591, 0x05BD}, - {0x05BF, 0x05BF}, {0x05C1, 0x05C2}, {0x05C4, 0x05C5}, - {0x05C7, 0x05C7}, {0x0610, 0x061A}, {0x064B, 0x065F}, - {0x0670, 0x0670}, {0x06D6, 0x06DC}, {0x06DF, 0x06E4}, - {0x06E7, 0x06E8}, {0x06EA, 0x06ED}, {0x0711, 0x0711}, - {0x0730, 0x074A}, {0x07A6, 0x07B0}, {0x07EB, 0x07F3}, - {0x0816, 0x0819}, {0x081B, 0x0823}, {0x0825, 0x0827}, - {0x0829, 0x082D}, {0x0859, 0x085B}, {0x08D4, 0x08E1}, - {0x08E3, 0x0903}, {0x093A, 0x093C}, {0x093E, 0x094F}, - {0x0951, 0x0957}, {0x0962, 0x0963}, {0x0981, 0x0983}, - {0x09BC, 0x09BC}, {0x09BE, 0x09C4}, {0x09C7, 0x09C8}, - {0x09CB, 0x09CD}, {0x09D7, 0x09D7}, {0x09E2, 0x09E3}, - {0x0A01, 0x0A03}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, - {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, - {0x0A70, 0x0A71}, {0x0A75, 0x0A75}, {0x0A81, 0x0A83}, - {0x0ABC, 0x0ABC}, {0x0ABE, 0x0AC5}, {0x0AC7, 0x0AC9}, - {0x0ACB, 0x0ACD}, {0x0AE2, 0x0AE3}, {0x0B01, 0x0B03}, - {0x0B3C, 0x0B3C}, {0x0B3E, 0x0B44}, {0x0B47, 0x0B48}, - {0x0B4B, 0x0B4D}, {0x0B56, 0x0B57}, {0x0B62, 0x0B63}, - {0x0B82, 0x0B82}, {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8}, - {0x0BCA, 0x0BCD}, {0x0BD7, 0x0BD7}, {0x0C00, 0x0C03}, - {0x0C3E, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, - {0x0C55, 0x0C56}, {0x0C62, 0x0C63}, {0x0C81, 0x0C83}, - {0x0CBC, 0x0CBC}, {0x0CBE, 0x0CC4}, {0x0CC6, 0x0CC8}, - {0x0CCA, 0x0CCD}, {0x0CD5, 0x0CD6}, {0x0CE2, 0x0CE3}, - {0x0D01, 0x0D03}, {0x0D3E, 0x0D44}, {0x0D46, 0x0D48}, - {0x0D4A, 0x0D4D}, {0x0D57, 0x0D57}, {0x0D62, 0x0D63}, - {0x0D82, 0x0D83}, {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, - {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, {0x0DF2, 0x0DF3}, - {0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E}, - {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC}, - {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35}, - {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F3E, 0x0F3F}, - {0x0F71, 0x0F84}, {0x0F86, 0x0F87}, {0x0F8D, 0x0F97}, - {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102B, 0x103E}, - {0x1056, 0x1059}, {0x105E, 0x1060}, {0x1062, 0x1064}, - {0x1067, 0x106D}, {0x1071, 0x1074}, {0x1082, 0x108D}, - {0x108F, 0x108F}, {0x109A, 0x109D}, {0x135D, 0x135F}, - {0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753}, - {0x1772, 0x1773}, {0x17B4, 0x17D3}, {0x17DD, 0x17DD}, - {0x180B, 0x180D}, {0x1885, 0x1886}, {0x18A9, 0x18A9}, - {0x1920, 0x192B}, {0x1930, 0x193B}, {0x1A17, 0x1A1B}, - {0x1A55, 0x1A5E}, {0x1A60, 0x1A7C}, {0x1A7F, 0x1A7F}, - {0x1AB0, 0x1ABE}, {0x1B00, 0x1B04}, {0x1B34, 0x1B44}, - {0x1B6B, 0x1B73}, {0x1B80, 0x1B82}, {0x1BA1, 0x1BAD}, - {0x1BE6, 0x1BF3}, {0x1C24, 0x1C37}, {0x1CD0, 0x1CD2}, - {0x1CD4, 0x1CE8}, {0x1CED, 0x1CED}, {0x1CF2, 0x1CF4}, - {0x1CF8, 0x1CF9}, {0x1DC0, 0x1DF5}, {0x1DFB, 0x1DFF}, - {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2D7F, 0x2D7F}, - {0x2DE0, 0x2DFF}, {0x302A, 0x302F}, {0x3099, 0x309A}, - {0xA66F, 0xA672}, {0xA674, 0xA67D}, {0xA69E, 0xA69F}, - {0xA6F0, 0xA6F1}, {0xA802, 0xA802}, {0xA806, 0xA806}, - {0xA80B, 0xA80B}, {0xA823, 0xA827}, {0xA880, 0xA881}, - {0xA8B4, 0xA8C5}, {0xA8E0, 0xA8F1}, {0xA926, 0xA92D}, - {0xA947, 0xA953}, {0xA980, 0xA983}, {0xA9B3, 0xA9C0}, - {0xA9E5, 0xA9E5}, {0xAA29, 0xAA36}, {0xAA43, 0xAA43}, - {0xAA4C, 0xAA4D}, {0xAA7B, 0xAA7D}, {0xAAB0, 0xAAB0}, - {0xAAB2, 0xAAB4}, {0xAAB7, 0xAAB8}, {0xAABE, 0xAABF}, - {0xAAC1, 0xAAC1}, {0xAAEB, 0xAAEF}, {0xAAF5, 0xAAF6}, - {0xABE3, 0xABEA}, {0xABEC, 0xABED}, {0xFB1E, 0xFB1E}, - {0xFE00, 0xFE0F}, {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, - {0x102E0, 0x102E0}, {0x10376, 0x1037A}, {0x10A01, 0x10A03}, - {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, {0x10A38, 0x10A3A}, - {0x10A3F, 0x10A3F}, {0x10AE5, 0x10AE6}, {0x11000, 0x11002}, - {0x11038, 0x11046}, {0x1107F, 0x11082}, {0x110B0, 0x110BA}, - {0x11100, 0x11102}, {0x11127, 0x11134}, {0x11173, 0x11173}, - {0x11180, 0x11182}, {0x111B3, 0x111C0}, {0x111CA, 0x111CC}, - {0x1122C, 0x11237}, {0x1123E, 0x1123E}, {0x112DF, 0x112EA}, - {0x11300, 0x11303}, {0x1133C, 0x1133C}, {0x1133E, 0x11344}, - {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11357, 0x11357}, - {0x11362, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, - {0x11435, 0x11446}, {0x114B0, 0x114C3}, {0x115AF, 0x115B5}, - {0x115B8, 0x115C0}, {0x115DC, 0x115DD}, {0x11630, 0x11640}, - {0x116AB, 0x116B7}, {0x1171D, 0x1172B}, {0x11C2F, 0x11C36}, - {0x11C38, 0x11C3F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6}, - {0x16AF0, 0x16AF4}, {0x16B30, 0x16B36}, {0x16F51, 0x16F7E}, - {0x16F8F, 0x16F92}, {0x1BC9D, 0x1BC9E}, {0x1D165, 0x1D169}, - {0x1D16D, 0x1D172}, {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, - {0x1D1AA, 0x1D1AD}, {0x1D242, 0x1D244}, {0x1DA00, 0x1DA36}, - {0x1DA3B, 0x1DA6C}, {0x1DA75, 0x1DA75}, {0x1DA84, 0x1DA84}, - {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, - {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, - {0x1E026, 0x1E02A}, {0x1E8D0, 0x1E8D6}, {0x1E944, 0x1E94A}, - {0xE0100, 0xE01EF}, -} - -var doublewidth = table{ - {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A}, - {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3}, - {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653}, - {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1}, - {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5}, - {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA}, - {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA}, - {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B}, - {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E}, - {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797}, - {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C}, - {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99}, - {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB}, - {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF}, - {0x3105, 0x312D}, {0x3131, 0x318E}, {0x3190, 0x31BA}, - {0x31C0, 0x31E3}, {0x31F0, 0x321E}, {0x3220, 0x3247}, - {0x3250, 0x32FE}, {0x3300, 0x4DBF}, {0x4E00, 0xA48C}, - {0xA490, 0xA4C6}, {0xA960, 0xA97C}, {0xAC00, 0xD7A3}, - {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, {0xFE30, 0xFE52}, - {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, {0xFF01, 0xFF60}, - {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE0}, {0x17000, 0x187EC}, - {0x18800, 0x18AF2}, {0x1B000, 0x1B001}, {0x1F004, 0x1F004}, - {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, - {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, - {0x1F250, 0x1F251}, {0x1F300, 0x1F320}, {0x1F32D, 0x1F335}, - {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393}, {0x1F3A0, 0x1F3CA}, - {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0}, {0x1F3F4, 0x1F3F4}, - {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440}, {0x1F442, 0x1F4FC}, - {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E}, {0x1F550, 0x1F567}, - {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A4}, - {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, {0x1F6CC, 0x1F6CC}, - {0x1F6D0, 0x1F6D2}, {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6F6}, - {0x1F910, 0x1F91E}, {0x1F920, 0x1F927}, {0x1F930, 0x1F930}, - {0x1F933, 0x1F93E}, {0x1F940, 0x1F94B}, {0x1F950, 0x1F95E}, - {0x1F980, 0x1F991}, {0x1F9C0, 0x1F9C0}, {0x20000, 0x2FFFD}, - {0x30000, 0x3FFFD}, -} - -var ambiguous = table{ - {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8}, - {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4}, - {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6}, - {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1}, - {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED}, - {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA}, - {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101}, - {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B}, - {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133}, - {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144}, - {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153}, - {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE}, - {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4}, - {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA}, - {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261}, - {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB}, - {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB}, - {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F}, - {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1}, - {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F}, - {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016}, - {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022}, - {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033}, - {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E}, - {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084}, - {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105}, - {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116}, - {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B}, - {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B}, - {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199}, - {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4}, - {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203}, - {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F}, - {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A}, - {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225}, - {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237}, - {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C}, - {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267}, - {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283}, - {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299}, - {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312}, - {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573}, - {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1}, - {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7}, - {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8}, - {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5}, - {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609}, - {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E}, - {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661}, - {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D}, - {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF}, - {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1}, - {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1}, - {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC}, - {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F}, - {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF}, - {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A}, - {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D}, - {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, - {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, -} - -var emoji = table{ - {0x1F1E6, 0x1F1FF}, {0x1F321, 0x1F321}, {0x1F324, 0x1F32C}, - {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, {0x1F396, 0x1F397}, - {0x1F399, 0x1F39B}, {0x1F39E, 0x1F39F}, {0x1F3CB, 0x1F3CE}, - {0x1F3D4, 0x1F3DF}, {0x1F3F3, 0x1F3F5}, {0x1F3F7, 0x1F3F7}, - {0x1F43F, 0x1F43F}, {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FD}, - {0x1F549, 0x1F54A}, {0x1F56F, 0x1F570}, {0x1F573, 0x1F579}, - {0x1F587, 0x1F587}, {0x1F58A, 0x1F58D}, {0x1F590, 0x1F590}, - {0x1F5A5, 0x1F5A5}, {0x1F5A8, 0x1F5A8}, {0x1F5B1, 0x1F5B2}, - {0x1F5BC, 0x1F5BC}, {0x1F5C2, 0x1F5C4}, {0x1F5D1, 0x1F5D3}, - {0x1F5DC, 0x1F5DE}, {0x1F5E1, 0x1F5E1}, {0x1F5E3, 0x1F5E3}, - {0x1F5E8, 0x1F5E8}, {0x1F5EF, 0x1F5EF}, {0x1F5F3, 0x1F5F3}, - {0x1F5FA, 0x1F5FA}, {0x1F6CB, 0x1F6CF}, {0x1F6E0, 0x1F6E5}, - {0x1F6E9, 0x1F6E9}, {0x1F6F0, 0x1F6F0}, {0x1F6F3, 0x1F6F3}, -} - -var notassigned = table{ - {0x0378, 0x0379}, {0x0380, 0x0383}, {0x038B, 0x038B}, - {0x038D, 0x038D}, {0x03A2, 0x03A2}, {0x0530, 0x0530}, - {0x0557, 0x0558}, {0x0560, 0x0560}, {0x0588, 0x0588}, - {0x058B, 0x058C}, {0x0590, 0x0590}, {0x05C8, 0x05CF}, - {0x05EB, 0x05EF}, {0x05F5, 0x05FF}, {0x061D, 0x061D}, - {0x070E, 0x070E}, {0x074B, 0x074C}, {0x07B2, 0x07BF}, - {0x07FB, 0x07FF}, {0x082E, 0x082F}, {0x083F, 0x083F}, - {0x085C, 0x085D}, {0x085F, 0x089F}, {0x08B5, 0x08B5}, - {0x08BE, 0x08D3}, {0x0984, 0x0984}, {0x098D, 0x098E}, - {0x0991, 0x0992}, {0x09A9, 0x09A9}, {0x09B1, 0x09B1}, - {0x09B3, 0x09B5}, {0x09BA, 0x09BB}, {0x09C5, 0x09C6}, - {0x09C9, 0x09CA}, {0x09CF, 0x09D6}, {0x09D8, 0x09DB}, - {0x09DE, 0x09DE}, {0x09E4, 0x09E5}, {0x09FC, 0x0A00}, - {0x0A04, 0x0A04}, {0x0A0B, 0x0A0E}, {0x0A11, 0x0A12}, - {0x0A29, 0x0A29}, {0x0A31, 0x0A31}, {0x0A34, 0x0A34}, - {0x0A37, 0x0A37}, {0x0A3A, 0x0A3B}, {0x0A3D, 0x0A3D}, - {0x0A43, 0x0A46}, {0x0A49, 0x0A4A}, {0x0A4E, 0x0A50}, - {0x0A52, 0x0A58}, {0x0A5D, 0x0A5D}, {0x0A5F, 0x0A65}, - {0x0A76, 0x0A80}, {0x0A84, 0x0A84}, {0x0A8E, 0x0A8E}, - {0x0A92, 0x0A92}, {0x0AA9, 0x0AA9}, {0x0AB1, 0x0AB1}, - {0x0AB4, 0x0AB4}, {0x0ABA, 0x0ABB}, {0x0AC6, 0x0AC6}, - {0x0ACA, 0x0ACA}, {0x0ACE, 0x0ACF}, {0x0AD1, 0x0ADF}, - {0x0AE4, 0x0AE5}, {0x0AF2, 0x0AF8}, {0x0AFA, 0x0B00}, - {0x0B04, 0x0B04}, {0x0B0D, 0x0B0E}, {0x0B11, 0x0B12}, - {0x0B29, 0x0B29}, {0x0B31, 0x0B31}, {0x0B34, 0x0B34}, - {0x0B3A, 0x0B3B}, {0x0B45, 0x0B46}, {0x0B49, 0x0B4A}, - {0x0B4E, 0x0B55}, {0x0B58, 0x0B5B}, {0x0B5E, 0x0B5E}, - {0x0B64, 0x0B65}, {0x0B78, 0x0B81}, {0x0B84, 0x0B84}, - {0x0B8B, 0x0B8D}, {0x0B91, 0x0B91}, {0x0B96, 0x0B98}, - {0x0B9B, 0x0B9B}, {0x0B9D, 0x0B9D}, {0x0BA0, 0x0BA2}, - {0x0BA5, 0x0BA7}, {0x0BAB, 0x0BAD}, {0x0BBA, 0x0BBD}, - {0x0BC3, 0x0BC5}, {0x0BC9, 0x0BC9}, {0x0BCE, 0x0BCF}, - {0x0BD1, 0x0BD6}, {0x0BD8, 0x0BE5}, {0x0BFB, 0x0BFF}, - {0x0C04, 0x0C04}, {0x0C0D, 0x0C0D}, {0x0C11, 0x0C11}, - {0x0C29, 0x0C29}, {0x0C3A, 0x0C3C}, {0x0C45, 0x0C45}, - {0x0C49, 0x0C49}, {0x0C4E, 0x0C54}, {0x0C57, 0x0C57}, - {0x0C5B, 0x0C5F}, {0x0C64, 0x0C65}, {0x0C70, 0x0C77}, - {0x0C84, 0x0C84}, {0x0C8D, 0x0C8D}, {0x0C91, 0x0C91}, - {0x0CA9, 0x0CA9}, {0x0CB4, 0x0CB4}, {0x0CBA, 0x0CBB}, - {0x0CC5, 0x0CC5}, {0x0CC9, 0x0CC9}, {0x0CCE, 0x0CD4}, - {0x0CD7, 0x0CDD}, {0x0CDF, 0x0CDF}, {0x0CE4, 0x0CE5}, - {0x0CF0, 0x0CF0}, {0x0CF3, 0x0D00}, {0x0D04, 0x0D04}, - {0x0D0D, 0x0D0D}, {0x0D11, 0x0D11}, {0x0D3B, 0x0D3C}, - {0x0D45, 0x0D45}, {0x0D49, 0x0D49}, {0x0D50, 0x0D53}, - {0x0D64, 0x0D65}, {0x0D80, 0x0D81}, {0x0D84, 0x0D84}, - {0x0D97, 0x0D99}, {0x0DB2, 0x0DB2}, {0x0DBC, 0x0DBC}, - {0x0DBE, 0x0DBF}, {0x0DC7, 0x0DC9}, {0x0DCB, 0x0DCE}, - {0x0DD5, 0x0DD5}, {0x0DD7, 0x0DD7}, {0x0DE0, 0x0DE5}, - {0x0DF0, 0x0DF1}, {0x0DF5, 0x0E00}, {0x0E3B, 0x0E3E}, - {0x0E5C, 0x0E80}, {0x0E83, 0x0E83}, {0x0E85, 0x0E86}, - {0x0E89, 0x0E89}, {0x0E8B, 0x0E8C}, {0x0E8E, 0x0E93}, - {0x0E98, 0x0E98}, {0x0EA0, 0x0EA0}, {0x0EA4, 0x0EA4}, - {0x0EA6, 0x0EA6}, {0x0EA8, 0x0EA9}, {0x0EAC, 0x0EAC}, - {0x0EBA, 0x0EBA}, {0x0EBE, 0x0EBF}, {0x0EC5, 0x0EC5}, - {0x0EC7, 0x0EC7}, {0x0ECE, 0x0ECF}, {0x0EDA, 0x0EDB}, - {0x0EE0, 0x0EFF}, {0x0F48, 0x0F48}, {0x0F6D, 0x0F70}, - {0x0F98, 0x0F98}, {0x0FBD, 0x0FBD}, {0x0FCD, 0x0FCD}, - {0x0FDB, 0x0FFF}, {0x10C6, 0x10C6}, {0x10C8, 0x10CC}, - {0x10CE, 0x10CF}, {0x1249, 0x1249}, {0x124E, 0x124F}, - {0x1257, 0x1257}, {0x1259, 0x1259}, {0x125E, 0x125F}, - {0x1289, 0x1289}, {0x128E, 0x128F}, {0x12B1, 0x12B1}, - {0x12B6, 0x12B7}, {0x12BF, 0x12BF}, {0x12C1, 0x12C1}, - {0x12C6, 0x12C7}, {0x12D7, 0x12D7}, {0x1311, 0x1311}, - {0x1316, 0x1317}, {0x135B, 0x135C}, {0x137D, 0x137F}, - {0x139A, 0x139F}, {0x13F6, 0x13F7}, {0x13FE, 0x13FF}, - {0x169D, 0x169F}, {0x16F9, 0x16FF}, {0x170D, 0x170D}, - {0x1715, 0x171F}, {0x1737, 0x173F}, {0x1754, 0x175F}, - {0x176D, 0x176D}, {0x1771, 0x1771}, {0x1774, 0x177F}, - {0x17DE, 0x17DF}, {0x17EA, 0x17EF}, {0x17FA, 0x17FF}, - {0x180F, 0x180F}, {0x181A, 0x181F}, {0x1878, 0x187F}, - {0x18AB, 0x18AF}, {0x18F6, 0x18FF}, {0x191F, 0x191F}, - {0x192C, 0x192F}, {0x193C, 0x193F}, {0x1941, 0x1943}, - {0x196E, 0x196F}, {0x1975, 0x197F}, {0x19AC, 0x19AF}, - {0x19CA, 0x19CF}, {0x19DB, 0x19DD}, {0x1A1C, 0x1A1D}, - {0x1A5F, 0x1A5F}, {0x1A7D, 0x1A7E}, {0x1A8A, 0x1A8F}, - {0x1A9A, 0x1A9F}, {0x1AAE, 0x1AAF}, {0x1ABF, 0x1AFF}, - {0x1B4C, 0x1B4F}, {0x1B7D, 0x1B7F}, {0x1BF4, 0x1BFB}, - {0x1C38, 0x1C3A}, {0x1C4A, 0x1C4C}, {0x1C89, 0x1CBF}, - {0x1CC8, 0x1CCF}, {0x1CF7, 0x1CF7}, {0x1CFA, 0x1CFF}, - {0x1DF6, 0x1DFA}, {0x1F16, 0x1F17}, {0x1F1E, 0x1F1F}, - {0x1F46, 0x1F47}, {0x1F4E, 0x1F4F}, {0x1F58, 0x1F58}, - {0x1F5A, 0x1F5A}, {0x1F5C, 0x1F5C}, {0x1F5E, 0x1F5E}, - {0x1F7E, 0x1F7F}, {0x1FB5, 0x1FB5}, {0x1FC5, 0x1FC5}, - {0x1FD4, 0x1FD5}, {0x1FDC, 0x1FDC}, {0x1FF0, 0x1FF1}, - {0x1FF5, 0x1FF5}, {0x1FFF, 0x1FFF}, {0x2065, 0x2065}, - {0x2072, 0x2073}, {0x208F, 0x208F}, {0x209D, 0x209F}, - {0x20BF, 0x20CF}, {0x20F1, 0x20FF}, {0x218C, 0x218F}, - {0x23FF, 0x23FF}, {0x2427, 0x243F}, {0x244B, 0x245F}, - {0x2B74, 0x2B75}, {0x2B96, 0x2B97}, {0x2BBA, 0x2BBC}, - {0x2BC9, 0x2BC9}, {0x2BD2, 0x2BEB}, {0x2BF0, 0x2BFF}, - {0x2C2F, 0x2C2F}, {0x2C5F, 0x2C5F}, {0x2CF4, 0x2CF8}, - {0x2D26, 0x2D26}, {0x2D28, 0x2D2C}, {0x2D2E, 0x2D2F}, - {0x2D68, 0x2D6E}, {0x2D71, 0x2D7E}, {0x2D97, 0x2D9F}, - {0x2DA7, 0x2DA7}, {0x2DAF, 0x2DAF}, {0x2DB7, 0x2DB7}, - {0x2DBF, 0x2DBF}, {0x2DC7, 0x2DC7}, {0x2DCF, 0x2DCF}, - {0x2DD7, 0x2DD7}, {0x2DDF, 0x2DDF}, {0x2E45, 0x2E7F}, - {0x2E9A, 0x2E9A}, {0x2EF4, 0x2EFF}, {0x2FD6, 0x2FEF}, - {0x2FFC, 0x2FFF}, {0x3040, 0x3040}, {0x3097, 0x3098}, - {0x3100, 0x3104}, {0x312E, 0x3130}, {0x318F, 0x318F}, - {0x31BB, 0x31BF}, {0x31E4, 0x31EF}, {0x321F, 0x321F}, - {0x32FF, 0x32FF}, {0x4DB6, 0x4DBF}, {0x9FD6, 0x9FFF}, - {0xA48D, 0xA48F}, {0xA4C7, 0xA4CF}, {0xA62C, 0xA63F}, - {0xA6F8, 0xA6FF}, {0xA7AF, 0xA7AF}, {0xA7B8, 0xA7F6}, - {0xA82C, 0xA82F}, {0xA83A, 0xA83F}, {0xA878, 0xA87F}, - {0xA8C6, 0xA8CD}, {0xA8DA, 0xA8DF}, {0xA8FE, 0xA8FF}, - {0xA954, 0xA95E}, {0xA97D, 0xA97F}, {0xA9CE, 0xA9CE}, - {0xA9DA, 0xA9DD}, {0xA9FF, 0xA9FF}, {0xAA37, 0xAA3F}, - {0xAA4E, 0xAA4F}, {0xAA5A, 0xAA5B}, {0xAAC3, 0xAADA}, - {0xAAF7, 0xAB00}, {0xAB07, 0xAB08}, {0xAB0F, 0xAB10}, - {0xAB17, 0xAB1F}, {0xAB27, 0xAB27}, {0xAB2F, 0xAB2F}, - {0xAB66, 0xAB6F}, {0xABEE, 0xABEF}, {0xABFA, 0xABFF}, - {0xD7A4, 0xD7AF}, {0xD7C7, 0xD7CA}, {0xD7FC, 0xD7FF}, - {0xFA6E, 0xFA6F}, {0xFADA, 0xFAFF}, {0xFB07, 0xFB12}, - {0xFB18, 0xFB1C}, {0xFB37, 0xFB37}, {0xFB3D, 0xFB3D}, - {0xFB3F, 0xFB3F}, {0xFB42, 0xFB42}, {0xFB45, 0xFB45}, - {0xFBC2, 0xFBD2}, {0xFD40, 0xFD4F}, {0xFD90, 0xFD91}, - {0xFDC8, 0xFDEF}, {0xFDFE, 0xFDFF}, {0xFE1A, 0xFE1F}, - {0xFE53, 0xFE53}, {0xFE67, 0xFE67}, {0xFE6C, 0xFE6F}, - {0xFE75, 0xFE75}, {0xFEFD, 0xFEFE}, {0xFF00, 0xFF00}, - {0xFFBF, 0xFFC1}, {0xFFC8, 0xFFC9}, {0xFFD0, 0xFFD1}, - {0xFFD8, 0xFFD9}, {0xFFDD, 0xFFDF}, {0xFFE7, 0xFFE7}, - {0xFFEF, 0xFFF8}, {0xFFFE, 0xFFFF}, {0x1000C, 0x1000C}, - {0x10027, 0x10027}, {0x1003B, 0x1003B}, {0x1003E, 0x1003E}, - {0x1004E, 0x1004F}, {0x1005E, 0x1007F}, {0x100FB, 0x100FF}, - {0x10103, 0x10106}, {0x10134, 0x10136}, {0x1018F, 0x1018F}, - {0x1019C, 0x1019F}, {0x101A1, 0x101CF}, {0x101FE, 0x1027F}, - {0x1029D, 0x1029F}, {0x102D1, 0x102DF}, {0x102FC, 0x102FF}, - {0x10324, 0x1032F}, {0x1034B, 0x1034F}, {0x1037B, 0x1037F}, - {0x1039E, 0x1039E}, {0x103C4, 0x103C7}, {0x103D6, 0x103FF}, - {0x1049E, 0x1049F}, {0x104AA, 0x104AF}, {0x104D4, 0x104D7}, - {0x104FC, 0x104FF}, {0x10528, 0x1052F}, {0x10564, 0x1056E}, - {0x10570, 0x105FF}, {0x10737, 0x1073F}, {0x10756, 0x1075F}, - {0x10768, 0x107FF}, {0x10806, 0x10807}, {0x10809, 0x10809}, - {0x10836, 0x10836}, {0x10839, 0x1083B}, {0x1083D, 0x1083E}, - {0x10856, 0x10856}, {0x1089F, 0x108A6}, {0x108B0, 0x108DF}, - {0x108F3, 0x108F3}, {0x108F6, 0x108FA}, {0x1091C, 0x1091E}, - {0x1093A, 0x1093E}, {0x10940, 0x1097F}, {0x109B8, 0x109BB}, - {0x109D0, 0x109D1}, {0x10A04, 0x10A04}, {0x10A07, 0x10A0B}, - {0x10A14, 0x10A14}, {0x10A18, 0x10A18}, {0x10A34, 0x10A37}, - {0x10A3B, 0x10A3E}, {0x10A48, 0x10A4F}, {0x10A59, 0x10A5F}, - {0x10AA0, 0x10ABF}, {0x10AE7, 0x10AEA}, {0x10AF7, 0x10AFF}, - {0x10B36, 0x10B38}, {0x10B56, 0x10B57}, {0x10B73, 0x10B77}, - {0x10B92, 0x10B98}, {0x10B9D, 0x10BA8}, {0x10BB0, 0x10BFF}, - {0x10C49, 0x10C7F}, {0x10CB3, 0x10CBF}, {0x10CF3, 0x10CF9}, - {0x10D00, 0x10E5F}, {0x10E7F, 0x10FFF}, {0x1104E, 0x11051}, - {0x11070, 0x1107E}, {0x110C2, 0x110CF}, {0x110E9, 0x110EF}, - {0x110FA, 0x110FF}, {0x11135, 0x11135}, {0x11144, 0x1114F}, - {0x11177, 0x1117F}, {0x111CE, 0x111CF}, {0x111E0, 0x111E0}, - {0x111F5, 0x111FF}, {0x11212, 0x11212}, {0x1123F, 0x1127F}, - {0x11287, 0x11287}, {0x11289, 0x11289}, {0x1128E, 0x1128E}, - {0x1129E, 0x1129E}, {0x112AA, 0x112AF}, {0x112EB, 0x112EF}, - {0x112FA, 0x112FF}, {0x11304, 0x11304}, {0x1130D, 0x1130E}, - {0x11311, 0x11312}, {0x11329, 0x11329}, {0x11331, 0x11331}, - {0x11334, 0x11334}, {0x1133A, 0x1133B}, {0x11345, 0x11346}, - {0x11349, 0x1134A}, {0x1134E, 0x1134F}, {0x11351, 0x11356}, - {0x11358, 0x1135C}, {0x11364, 0x11365}, {0x1136D, 0x1136F}, - {0x11375, 0x113FF}, {0x1145A, 0x1145A}, {0x1145C, 0x1145C}, - {0x1145E, 0x1147F}, {0x114C8, 0x114CF}, {0x114DA, 0x1157F}, - {0x115B6, 0x115B7}, {0x115DE, 0x115FF}, {0x11645, 0x1164F}, - {0x1165A, 0x1165F}, {0x1166D, 0x1167F}, {0x116B8, 0x116BF}, - {0x116CA, 0x116FF}, {0x1171A, 0x1171C}, {0x1172C, 0x1172F}, - {0x11740, 0x1189F}, {0x118F3, 0x118FE}, {0x11900, 0x11ABF}, - {0x11AF9, 0x11BFF}, {0x11C09, 0x11C09}, {0x11C37, 0x11C37}, - {0x11C46, 0x11C4F}, {0x11C6D, 0x11C6F}, {0x11C90, 0x11C91}, - {0x11CA8, 0x11CA8}, {0x11CB7, 0x11FFF}, {0x1239A, 0x123FF}, - {0x1246F, 0x1246F}, {0x12475, 0x1247F}, {0x12544, 0x12FFF}, - {0x1342F, 0x143FF}, {0x14647, 0x167FF}, {0x16A39, 0x16A3F}, - {0x16A5F, 0x16A5F}, {0x16A6A, 0x16A6D}, {0x16A70, 0x16ACF}, - {0x16AEE, 0x16AEF}, {0x16AF6, 0x16AFF}, {0x16B46, 0x16B4F}, - {0x16B5A, 0x16B5A}, {0x16B62, 0x16B62}, {0x16B78, 0x16B7C}, - {0x16B90, 0x16EFF}, {0x16F45, 0x16F4F}, {0x16F7F, 0x16F8E}, - {0x16FA0, 0x16FDF}, {0x16FE1, 0x16FFF}, {0x187ED, 0x187FF}, - {0x18AF3, 0x1AFFF}, {0x1B002, 0x1BBFF}, {0x1BC6B, 0x1BC6F}, - {0x1BC7D, 0x1BC7F}, {0x1BC89, 0x1BC8F}, {0x1BC9A, 0x1BC9B}, - {0x1BCA4, 0x1CFFF}, {0x1D0F6, 0x1D0FF}, {0x1D127, 0x1D128}, - {0x1D1E9, 0x1D1FF}, {0x1D246, 0x1D2FF}, {0x1D357, 0x1D35F}, - {0x1D372, 0x1D3FF}, {0x1D455, 0x1D455}, {0x1D49D, 0x1D49D}, - {0x1D4A0, 0x1D4A1}, {0x1D4A3, 0x1D4A4}, {0x1D4A7, 0x1D4A8}, - {0x1D4AD, 0x1D4AD}, {0x1D4BA, 0x1D4BA}, {0x1D4BC, 0x1D4BC}, - {0x1D4C4, 0x1D4C4}, {0x1D506, 0x1D506}, {0x1D50B, 0x1D50C}, - {0x1D515, 0x1D515}, {0x1D51D, 0x1D51D}, {0x1D53A, 0x1D53A}, - {0x1D53F, 0x1D53F}, {0x1D545, 0x1D545}, {0x1D547, 0x1D549}, - {0x1D551, 0x1D551}, {0x1D6A6, 0x1D6A7}, {0x1D7CC, 0x1D7CD}, - {0x1DA8C, 0x1DA9A}, {0x1DAA0, 0x1DAA0}, {0x1DAB0, 0x1DFFF}, - {0x1E007, 0x1E007}, {0x1E019, 0x1E01A}, {0x1E022, 0x1E022}, - {0x1E025, 0x1E025}, {0x1E02B, 0x1E7FF}, {0x1E8C5, 0x1E8C6}, - {0x1E8D7, 0x1E8FF}, {0x1E94B, 0x1E94F}, {0x1E95A, 0x1E95D}, - {0x1E960, 0x1EDFF}, {0x1EE04, 0x1EE04}, {0x1EE20, 0x1EE20}, - {0x1EE23, 0x1EE23}, {0x1EE25, 0x1EE26}, {0x1EE28, 0x1EE28}, - {0x1EE33, 0x1EE33}, {0x1EE38, 0x1EE38}, {0x1EE3A, 0x1EE3A}, - {0x1EE3C, 0x1EE41}, {0x1EE43, 0x1EE46}, {0x1EE48, 0x1EE48}, - {0x1EE4A, 0x1EE4A}, {0x1EE4C, 0x1EE4C}, {0x1EE50, 0x1EE50}, - {0x1EE53, 0x1EE53}, {0x1EE55, 0x1EE56}, {0x1EE58, 0x1EE58}, - {0x1EE5A, 0x1EE5A}, {0x1EE5C, 0x1EE5C}, {0x1EE5E, 0x1EE5E}, - {0x1EE60, 0x1EE60}, {0x1EE63, 0x1EE63}, {0x1EE65, 0x1EE66}, - {0x1EE6B, 0x1EE6B}, {0x1EE73, 0x1EE73}, {0x1EE78, 0x1EE78}, - {0x1EE7D, 0x1EE7D}, {0x1EE7F, 0x1EE7F}, {0x1EE8A, 0x1EE8A}, - {0x1EE9C, 0x1EEA0}, {0x1EEA4, 0x1EEA4}, {0x1EEAA, 0x1EEAA}, - {0x1EEBC, 0x1EEEF}, {0x1EEF2, 0x1EFFF}, {0x1F02C, 0x1F02F}, - {0x1F094, 0x1F09F}, {0x1F0AF, 0x1F0B0}, {0x1F0C0, 0x1F0C0}, - {0x1F0D0, 0x1F0D0}, {0x1F0F6, 0x1F0FF}, {0x1F10D, 0x1F10F}, - {0x1F12F, 0x1F12F}, {0x1F16C, 0x1F16F}, {0x1F1AD, 0x1F1E5}, - {0x1F203, 0x1F20F}, {0x1F23C, 0x1F23F}, {0x1F249, 0x1F24F}, - {0x1F252, 0x1F2FF}, {0x1F6D3, 0x1F6DF}, {0x1F6ED, 0x1F6EF}, - {0x1F6F7, 0x1F6FF}, {0x1F774, 0x1F77F}, {0x1F7D5, 0x1F7FF}, - {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, {0x1F85A, 0x1F85F}, - {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F90F}, {0x1F91F, 0x1F91F}, - {0x1F928, 0x1F92F}, {0x1F931, 0x1F932}, {0x1F93F, 0x1F93F}, - {0x1F94C, 0x1F94F}, {0x1F95F, 0x1F97F}, {0x1F992, 0x1F9BF}, - {0x1F9C1, 0x1FFFF}, {0x2A6D7, 0x2A6FF}, {0x2B735, 0x2B73F}, - {0x2B81E, 0x2B81F}, {0x2CEA2, 0x2F7FF}, {0x2FA1E, 0xE0000}, - {0xE0002, 0xE001F}, {0xE0080, 0xE00FF}, {0xE01F0, 0xEFFFF}, - {0xFFFFE, 0xFFFFF}, -} - -var neutral = table{ - {0x0000, 0x001F}, {0x007F, 0x007F}, {0x0080, 0x009F}, - {0x00A0, 0x00A0}, {0x00A9, 0x00A9}, {0x00AB, 0x00AB}, - {0x00B5, 0x00B5}, {0x00BB, 0x00BB}, {0x00C0, 0x00C5}, - {0x00C7, 0x00CF}, {0x00D1, 0x00D6}, {0x00D9, 0x00DD}, - {0x00E2, 0x00E5}, {0x00E7, 0x00E7}, {0x00EB, 0x00EB}, - {0x00EE, 0x00EF}, {0x00F1, 0x00F1}, {0x00F4, 0x00F6}, - {0x00FB, 0x00FB}, {0x00FD, 0x00FD}, {0x00FF, 0x00FF}, - {0x0100, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112}, - {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A}, - {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E}, - {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C}, - {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A}, - {0x016C, 0x017F}, {0x0180, 0x01BA}, {0x01BB, 0x01BB}, - {0x01BC, 0x01BF}, {0x01C0, 0x01C3}, {0x01C4, 0x01CD}, - {0x01CF, 0x01CF}, {0x01D1, 0x01D1}, {0x01D3, 0x01D3}, - {0x01D5, 0x01D5}, {0x01D7, 0x01D7}, {0x01D9, 0x01D9}, - {0x01DB, 0x01DB}, {0x01DD, 0x024F}, {0x0250, 0x0250}, - {0x0252, 0x0260}, {0x0262, 0x0293}, {0x0294, 0x0294}, - {0x0295, 0x02AF}, {0x02B0, 0x02C1}, {0x02C2, 0x02C3}, - {0x02C5, 0x02C5}, {0x02C6, 0x02C6}, {0x02C8, 0x02C8}, - {0x02CC, 0x02CC}, {0x02CE, 0x02CF}, {0x02D1, 0x02D1}, - {0x02D2, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE}, - {0x02E0, 0x02E4}, {0x02E5, 0x02EB}, {0x02EC, 0x02EC}, - {0x02ED, 0x02ED}, {0x02EE, 0x02EE}, {0x02EF, 0x02FF}, - {0x0370, 0x0373}, {0x0374, 0x0374}, {0x0375, 0x0375}, - {0x0376, 0x0377}, {0x037A, 0x037A}, {0x037B, 0x037D}, - {0x037E, 0x037E}, {0x037F, 0x037F}, {0x0384, 0x0385}, - {0x0386, 0x0386}, {0x0387, 0x0387}, {0x0388, 0x038A}, - {0x038C, 0x038C}, {0x038E, 0x0390}, {0x03AA, 0x03B0}, - {0x03C2, 0x03C2}, {0x03CA, 0x03F5}, {0x03F6, 0x03F6}, - {0x03F7, 0x03FF}, {0x0400, 0x0400}, {0x0402, 0x040F}, - {0x0450, 0x0450}, {0x0452, 0x0481}, {0x0482, 0x0482}, - {0x0483, 0x0487}, {0x0488, 0x0489}, {0x048A, 0x04FF}, - {0x0500, 0x052F}, {0x0531, 0x0556}, {0x0559, 0x0559}, - {0x055A, 0x055F}, {0x0561, 0x0587}, {0x0589, 0x0589}, - {0x058A, 0x058A}, {0x058D, 0x058E}, {0x058F, 0x058F}, - {0x0591, 0x05BD}, {0x05BE, 0x05BE}, {0x05BF, 0x05BF}, - {0x05C0, 0x05C0}, {0x05C1, 0x05C2}, {0x05C3, 0x05C3}, - {0x05C4, 0x05C5}, {0x05C6, 0x05C6}, {0x05C7, 0x05C7}, - {0x05D0, 0x05EA}, {0x05F0, 0x05F2}, {0x05F3, 0x05F4}, - {0x0600, 0x0605}, {0x0606, 0x0608}, {0x0609, 0x060A}, - {0x060B, 0x060B}, {0x060C, 0x060D}, {0x060E, 0x060F}, - {0x0610, 0x061A}, {0x061B, 0x061B}, {0x061C, 0x061C}, - {0x061E, 0x061F}, {0x0620, 0x063F}, {0x0640, 0x0640}, - {0x0641, 0x064A}, {0x064B, 0x065F}, {0x0660, 0x0669}, - {0x066A, 0x066D}, {0x066E, 0x066F}, {0x0670, 0x0670}, - {0x0671, 0x06D3}, {0x06D4, 0x06D4}, {0x06D5, 0x06D5}, - {0x06D6, 0x06DC}, {0x06DD, 0x06DD}, {0x06DE, 0x06DE}, - {0x06DF, 0x06E4}, {0x06E5, 0x06E6}, {0x06E7, 0x06E8}, - {0x06E9, 0x06E9}, {0x06EA, 0x06ED}, {0x06EE, 0x06EF}, - {0x06F0, 0x06F9}, {0x06FA, 0x06FC}, {0x06FD, 0x06FE}, - {0x06FF, 0x06FF}, {0x0700, 0x070D}, {0x070F, 0x070F}, - {0x0710, 0x0710}, {0x0711, 0x0711}, {0x0712, 0x072F}, - {0x0730, 0x074A}, {0x074D, 0x074F}, {0x0750, 0x077F}, - {0x0780, 0x07A5}, {0x07A6, 0x07B0}, {0x07B1, 0x07B1}, - {0x07C0, 0x07C9}, {0x07CA, 0x07EA}, {0x07EB, 0x07F3}, - {0x07F4, 0x07F5}, {0x07F6, 0x07F6}, {0x07F7, 0x07F9}, - {0x07FA, 0x07FA}, {0x0800, 0x0815}, {0x0816, 0x0819}, - {0x081A, 0x081A}, {0x081B, 0x0823}, {0x0824, 0x0824}, - {0x0825, 0x0827}, {0x0828, 0x0828}, {0x0829, 0x082D}, - {0x0830, 0x083E}, {0x0840, 0x0858}, {0x0859, 0x085B}, - {0x085E, 0x085E}, {0x08A0, 0x08B4}, {0x08B6, 0x08BD}, - {0x08D4, 0x08E1}, {0x08E2, 0x08E2}, {0x08E3, 0x08FF}, - {0x0900, 0x0902}, {0x0903, 0x0903}, {0x0904, 0x0939}, - {0x093A, 0x093A}, {0x093B, 0x093B}, {0x093C, 0x093C}, - {0x093D, 0x093D}, {0x093E, 0x0940}, {0x0941, 0x0948}, - {0x0949, 0x094C}, {0x094D, 0x094D}, {0x094E, 0x094F}, - {0x0950, 0x0950}, {0x0951, 0x0957}, {0x0958, 0x0961}, - {0x0962, 0x0963}, {0x0964, 0x0965}, {0x0966, 0x096F}, - {0x0970, 0x0970}, {0x0971, 0x0971}, {0x0972, 0x097F}, - {0x0980, 0x0980}, {0x0981, 0x0981}, {0x0982, 0x0983}, - {0x0985, 0x098C}, {0x098F, 0x0990}, {0x0993, 0x09A8}, - {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, {0x09B6, 0x09B9}, - {0x09BC, 0x09BC}, {0x09BD, 0x09BD}, {0x09BE, 0x09C0}, - {0x09C1, 0x09C4}, {0x09C7, 0x09C8}, {0x09CB, 0x09CC}, - {0x09CD, 0x09CD}, {0x09CE, 0x09CE}, {0x09D7, 0x09D7}, - {0x09DC, 0x09DD}, {0x09DF, 0x09E1}, {0x09E2, 0x09E3}, - {0x09E6, 0x09EF}, {0x09F0, 0x09F1}, {0x09F2, 0x09F3}, - {0x09F4, 0x09F9}, {0x09FA, 0x09FA}, {0x09FB, 0x09FB}, - {0x0A01, 0x0A02}, {0x0A03, 0x0A03}, {0x0A05, 0x0A0A}, - {0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, {0x0A2A, 0x0A30}, - {0x0A32, 0x0A33}, {0x0A35, 0x0A36}, {0x0A38, 0x0A39}, - {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A40}, {0x0A41, 0x0A42}, - {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, - {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A6F}, - {0x0A70, 0x0A71}, {0x0A72, 0x0A74}, {0x0A75, 0x0A75}, - {0x0A81, 0x0A82}, {0x0A83, 0x0A83}, {0x0A85, 0x0A8D}, - {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, - {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9}, {0x0ABC, 0x0ABC}, - {0x0ABD, 0x0ABD}, {0x0ABE, 0x0AC0}, {0x0AC1, 0x0AC5}, - {0x0AC7, 0x0AC8}, {0x0AC9, 0x0AC9}, {0x0ACB, 0x0ACC}, - {0x0ACD, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE1}, - {0x0AE2, 0x0AE3}, {0x0AE6, 0x0AEF}, {0x0AF0, 0x0AF0}, - {0x0AF1, 0x0AF1}, {0x0AF9, 0x0AF9}, {0x0B01, 0x0B01}, - {0x0B02, 0x0B03}, {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, - {0x0B13, 0x0B28}, {0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, - {0x0B35, 0x0B39}, {0x0B3C, 0x0B3C}, {0x0B3D, 0x0B3D}, - {0x0B3E, 0x0B3E}, {0x0B3F, 0x0B3F}, {0x0B40, 0x0B40}, - {0x0B41, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4C}, - {0x0B4D, 0x0B4D}, {0x0B56, 0x0B56}, {0x0B57, 0x0B57}, - {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B61}, {0x0B62, 0x0B63}, - {0x0B66, 0x0B6F}, {0x0B70, 0x0B70}, {0x0B71, 0x0B71}, - {0x0B72, 0x0B77}, {0x0B82, 0x0B82}, {0x0B83, 0x0B83}, - {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, - {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, - {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, - {0x0BBE, 0x0BBF}, {0x0BC0, 0x0BC0}, {0x0BC1, 0x0BC2}, - {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCC}, {0x0BCD, 0x0BCD}, - {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BEF}, - {0x0BF0, 0x0BF2}, {0x0BF3, 0x0BF8}, {0x0BF9, 0x0BF9}, - {0x0BFA, 0x0BFA}, {0x0C00, 0x0C00}, {0x0C01, 0x0C03}, - {0x0C05, 0x0C0C}, {0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, - {0x0C2A, 0x0C39}, {0x0C3D, 0x0C3D}, {0x0C3E, 0x0C40}, - {0x0C41, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, - {0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C61}, - {0x0C62, 0x0C63}, {0x0C66, 0x0C6F}, {0x0C78, 0x0C7E}, - {0x0C7F, 0x0C7F}, {0x0C80, 0x0C80}, {0x0C81, 0x0C81}, - {0x0C82, 0x0C83}, {0x0C85, 0x0C8C}, {0x0C8E, 0x0C90}, - {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9}, - {0x0CBC, 0x0CBC}, {0x0CBD, 0x0CBD}, {0x0CBE, 0x0CBE}, - {0x0CBF, 0x0CBF}, {0x0CC0, 0x0CC4}, {0x0CC6, 0x0CC6}, - {0x0CC7, 0x0CC8}, {0x0CCA, 0x0CCB}, {0x0CCC, 0x0CCD}, - {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE1}, - {0x0CE2, 0x0CE3}, {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, - {0x0D01, 0x0D01}, {0x0D02, 0x0D03}, {0x0D05, 0x0D0C}, - {0x0D0E, 0x0D10}, {0x0D12, 0x0D3A}, {0x0D3D, 0x0D3D}, - {0x0D3E, 0x0D40}, {0x0D41, 0x0D44}, {0x0D46, 0x0D48}, - {0x0D4A, 0x0D4C}, {0x0D4D, 0x0D4D}, {0x0D4E, 0x0D4E}, - {0x0D4F, 0x0D4F}, {0x0D54, 0x0D56}, {0x0D57, 0x0D57}, - {0x0D58, 0x0D5E}, {0x0D5F, 0x0D61}, {0x0D62, 0x0D63}, - {0x0D66, 0x0D6F}, {0x0D70, 0x0D78}, {0x0D79, 0x0D79}, - {0x0D7A, 0x0D7F}, {0x0D82, 0x0D83}, {0x0D85, 0x0D96}, - {0x0D9A, 0x0DB1}, {0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, - {0x0DC0, 0x0DC6}, {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD1}, - {0x0DD2, 0x0DD4}, {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, - {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF3}, {0x0DF4, 0x0DF4}, - {0x0E01, 0x0E30}, {0x0E31, 0x0E31}, {0x0E32, 0x0E33}, - {0x0E34, 0x0E3A}, {0x0E3F, 0x0E3F}, {0x0E40, 0x0E45}, - {0x0E46, 0x0E46}, {0x0E47, 0x0E4E}, {0x0E4F, 0x0E4F}, - {0x0E50, 0x0E59}, {0x0E5A, 0x0E5B}, {0x0E81, 0x0E82}, - {0x0E84, 0x0E84}, {0x0E87, 0x0E88}, {0x0E8A, 0x0E8A}, - {0x0E8D, 0x0E8D}, {0x0E94, 0x0E97}, {0x0E99, 0x0E9F}, - {0x0EA1, 0x0EA3}, {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EA7}, - {0x0EAA, 0x0EAB}, {0x0EAD, 0x0EB0}, {0x0EB1, 0x0EB1}, - {0x0EB2, 0x0EB3}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC}, - {0x0EBD, 0x0EBD}, {0x0EC0, 0x0EC4}, {0x0EC6, 0x0EC6}, - {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9}, {0x0EDC, 0x0EDF}, - {0x0F00, 0x0F00}, {0x0F01, 0x0F03}, {0x0F04, 0x0F12}, - {0x0F13, 0x0F13}, {0x0F14, 0x0F14}, {0x0F15, 0x0F17}, - {0x0F18, 0x0F19}, {0x0F1A, 0x0F1F}, {0x0F20, 0x0F29}, - {0x0F2A, 0x0F33}, {0x0F34, 0x0F34}, {0x0F35, 0x0F35}, - {0x0F36, 0x0F36}, {0x0F37, 0x0F37}, {0x0F38, 0x0F38}, - {0x0F39, 0x0F39}, {0x0F3A, 0x0F3A}, {0x0F3B, 0x0F3B}, - {0x0F3C, 0x0F3C}, {0x0F3D, 0x0F3D}, {0x0F3E, 0x0F3F}, - {0x0F40, 0x0F47}, {0x0F49, 0x0F6C}, {0x0F71, 0x0F7E}, - {0x0F7F, 0x0F7F}, {0x0F80, 0x0F84}, {0x0F85, 0x0F85}, - {0x0F86, 0x0F87}, {0x0F88, 0x0F8C}, {0x0F8D, 0x0F97}, - {0x0F99, 0x0FBC}, {0x0FBE, 0x0FC5}, {0x0FC6, 0x0FC6}, - {0x0FC7, 0x0FCC}, {0x0FCE, 0x0FCF}, {0x0FD0, 0x0FD4}, - {0x0FD5, 0x0FD8}, {0x0FD9, 0x0FDA}, {0x1000, 0x102A}, - {0x102B, 0x102C}, {0x102D, 0x1030}, {0x1031, 0x1031}, - {0x1032, 0x1037}, {0x1038, 0x1038}, {0x1039, 0x103A}, - {0x103B, 0x103C}, {0x103D, 0x103E}, {0x103F, 0x103F}, - {0x1040, 0x1049}, {0x104A, 0x104F}, {0x1050, 0x1055}, - {0x1056, 0x1057}, {0x1058, 0x1059}, {0x105A, 0x105D}, - {0x105E, 0x1060}, {0x1061, 0x1061}, {0x1062, 0x1064}, - {0x1065, 0x1066}, {0x1067, 0x106D}, {0x106E, 0x1070}, - {0x1071, 0x1074}, {0x1075, 0x1081}, {0x1082, 0x1082}, - {0x1083, 0x1084}, {0x1085, 0x1086}, {0x1087, 0x108C}, - {0x108D, 0x108D}, {0x108E, 0x108E}, {0x108F, 0x108F}, - {0x1090, 0x1099}, {0x109A, 0x109C}, {0x109D, 0x109D}, - {0x109E, 0x109F}, {0x10A0, 0x10C5}, {0x10C7, 0x10C7}, - {0x10CD, 0x10CD}, {0x10D0, 0x10FA}, {0x10FB, 0x10FB}, - {0x10FC, 0x10FC}, {0x10FD, 0x10FF}, {0x1160, 0x11FF}, - {0x1200, 0x1248}, {0x124A, 0x124D}, {0x1250, 0x1256}, - {0x1258, 0x1258}, {0x125A, 0x125D}, {0x1260, 0x1288}, - {0x128A, 0x128D}, {0x1290, 0x12B0}, {0x12B2, 0x12B5}, - {0x12B8, 0x12BE}, {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, - {0x12C8, 0x12D6}, {0x12D8, 0x1310}, {0x1312, 0x1315}, - {0x1318, 0x135A}, {0x135D, 0x135F}, {0x1360, 0x1368}, - {0x1369, 0x137C}, {0x1380, 0x138F}, {0x1390, 0x1399}, - {0x13A0, 0x13F5}, {0x13F8, 0x13FD}, {0x1400, 0x1400}, - {0x1401, 0x166C}, {0x166D, 0x166E}, {0x166F, 0x167F}, - {0x1680, 0x1680}, {0x1681, 0x169A}, {0x169B, 0x169B}, - {0x169C, 0x169C}, {0x16A0, 0x16EA}, {0x16EB, 0x16ED}, - {0x16EE, 0x16F0}, {0x16F1, 0x16F8}, {0x1700, 0x170C}, - {0x170E, 0x1711}, {0x1712, 0x1714}, {0x1720, 0x1731}, - {0x1732, 0x1734}, {0x1735, 0x1736}, {0x1740, 0x1751}, - {0x1752, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770}, - {0x1772, 0x1773}, {0x1780, 0x17B3}, {0x17B4, 0x17B5}, - {0x17B6, 0x17B6}, {0x17B7, 0x17BD}, {0x17BE, 0x17C5}, - {0x17C6, 0x17C6}, {0x17C7, 0x17C8}, {0x17C9, 0x17D3}, - {0x17D4, 0x17D6}, {0x17D7, 0x17D7}, {0x17D8, 0x17DA}, - {0x17DB, 0x17DB}, {0x17DC, 0x17DC}, {0x17DD, 0x17DD}, - {0x17E0, 0x17E9}, {0x17F0, 0x17F9}, {0x1800, 0x1805}, - {0x1806, 0x1806}, {0x1807, 0x180A}, {0x180B, 0x180D}, - {0x180E, 0x180E}, {0x1810, 0x1819}, {0x1820, 0x1842}, - {0x1843, 0x1843}, {0x1844, 0x1877}, {0x1880, 0x1884}, - {0x1885, 0x1886}, {0x1887, 0x18A8}, {0x18A9, 0x18A9}, - {0x18AA, 0x18AA}, {0x18B0, 0x18F5}, {0x1900, 0x191E}, - {0x1920, 0x1922}, {0x1923, 0x1926}, {0x1927, 0x1928}, - {0x1929, 0x192B}, {0x1930, 0x1931}, {0x1932, 0x1932}, - {0x1933, 0x1938}, {0x1939, 0x193B}, {0x1940, 0x1940}, - {0x1944, 0x1945}, {0x1946, 0x194F}, {0x1950, 0x196D}, - {0x1970, 0x1974}, {0x1980, 0x19AB}, {0x19B0, 0x19C9}, - {0x19D0, 0x19D9}, {0x19DA, 0x19DA}, {0x19DE, 0x19DF}, - {0x19E0, 0x19FF}, {0x1A00, 0x1A16}, {0x1A17, 0x1A18}, - {0x1A19, 0x1A1A}, {0x1A1B, 0x1A1B}, {0x1A1E, 0x1A1F}, - {0x1A20, 0x1A54}, {0x1A55, 0x1A55}, {0x1A56, 0x1A56}, - {0x1A57, 0x1A57}, {0x1A58, 0x1A5E}, {0x1A60, 0x1A60}, - {0x1A61, 0x1A61}, {0x1A62, 0x1A62}, {0x1A63, 0x1A64}, - {0x1A65, 0x1A6C}, {0x1A6D, 0x1A72}, {0x1A73, 0x1A7C}, - {0x1A7F, 0x1A7F}, {0x1A80, 0x1A89}, {0x1A90, 0x1A99}, - {0x1AA0, 0x1AA6}, {0x1AA7, 0x1AA7}, {0x1AA8, 0x1AAD}, - {0x1AB0, 0x1ABD}, {0x1ABE, 0x1ABE}, {0x1B00, 0x1B03}, - {0x1B04, 0x1B04}, {0x1B05, 0x1B33}, {0x1B34, 0x1B34}, - {0x1B35, 0x1B35}, {0x1B36, 0x1B3A}, {0x1B3B, 0x1B3B}, - {0x1B3C, 0x1B3C}, {0x1B3D, 0x1B41}, {0x1B42, 0x1B42}, - {0x1B43, 0x1B44}, {0x1B45, 0x1B4B}, {0x1B50, 0x1B59}, - {0x1B5A, 0x1B60}, {0x1B61, 0x1B6A}, {0x1B6B, 0x1B73}, - {0x1B74, 0x1B7C}, {0x1B80, 0x1B81}, {0x1B82, 0x1B82}, - {0x1B83, 0x1BA0}, {0x1BA1, 0x1BA1}, {0x1BA2, 0x1BA5}, - {0x1BA6, 0x1BA7}, {0x1BA8, 0x1BA9}, {0x1BAA, 0x1BAA}, - {0x1BAB, 0x1BAD}, {0x1BAE, 0x1BAF}, {0x1BB0, 0x1BB9}, - {0x1BBA, 0x1BBF}, {0x1BC0, 0x1BE5}, {0x1BE6, 0x1BE6}, - {0x1BE7, 0x1BE7}, {0x1BE8, 0x1BE9}, {0x1BEA, 0x1BEC}, - {0x1BED, 0x1BED}, {0x1BEE, 0x1BEE}, {0x1BEF, 0x1BF1}, - {0x1BF2, 0x1BF3}, {0x1BFC, 0x1BFF}, {0x1C00, 0x1C23}, - {0x1C24, 0x1C2B}, {0x1C2C, 0x1C33}, {0x1C34, 0x1C35}, - {0x1C36, 0x1C37}, {0x1C3B, 0x1C3F}, {0x1C40, 0x1C49}, - {0x1C4D, 0x1C4F}, {0x1C50, 0x1C59}, {0x1C5A, 0x1C77}, - {0x1C78, 0x1C7D}, {0x1C7E, 0x1C7F}, {0x1C80, 0x1C88}, - {0x1CC0, 0x1CC7}, {0x1CD0, 0x1CD2}, {0x1CD3, 0x1CD3}, - {0x1CD4, 0x1CE0}, {0x1CE1, 0x1CE1}, {0x1CE2, 0x1CE8}, - {0x1CE9, 0x1CEC}, {0x1CED, 0x1CED}, {0x1CEE, 0x1CF1}, - {0x1CF2, 0x1CF3}, {0x1CF4, 0x1CF4}, {0x1CF5, 0x1CF6}, - {0x1CF8, 0x1CF9}, {0x1D00, 0x1D2B}, {0x1D2C, 0x1D6A}, - {0x1D6B, 0x1D77}, {0x1D78, 0x1D78}, {0x1D79, 0x1D7F}, - {0x1D80, 0x1D9A}, {0x1D9B, 0x1DBF}, {0x1DC0, 0x1DF5}, - {0x1DFB, 0x1DFF}, {0x1E00, 0x1EFF}, {0x1F00, 0x1F15}, - {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, - {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, - {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, - {0x1FB6, 0x1FBC}, {0x1FBD, 0x1FBD}, {0x1FBE, 0x1FBE}, - {0x1FBF, 0x1FC1}, {0x1FC2, 0x1FC4}, {0x1FC6, 0x1FCC}, - {0x1FCD, 0x1FCF}, {0x1FD0, 0x1FD3}, {0x1FD6, 0x1FDB}, - {0x1FDD, 0x1FDF}, {0x1FE0, 0x1FEC}, {0x1FED, 0x1FEF}, - {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFC}, {0x1FFD, 0x1FFE}, - {0x2000, 0x200A}, {0x200B, 0x200F}, {0x2011, 0x2012}, - {0x2017, 0x2017}, {0x201A, 0x201A}, {0x201B, 0x201B}, - {0x201E, 0x201E}, {0x201F, 0x201F}, {0x2023, 0x2023}, - {0x2028, 0x2028}, {0x2029, 0x2029}, {0x202A, 0x202E}, - {0x202F, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034}, - {0x2036, 0x2038}, {0x2039, 0x2039}, {0x203A, 0x203A}, - {0x203C, 0x203D}, {0x203F, 0x2040}, {0x2041, 0x2043}, - {0x2044, 0x2044}, {0x2045, 0x2045}, {0x2046, 0x2046}, - {0x2047, 0x2051}, {0x2052, 0x2052}, {0x2053, 0x2053}, - {0x2054, 0x2054}, {0x2055, 0x205E}, {0x205F, 0x205F}, - {0x2060, 0x2064}, {0x2066, 0x206F}, {0x2070, 0x2070}, - {0x2071, 0x2071}, {0x2075, 0x2079}, {0x207A, 0x207C}, - {0x207D, 0x207D}, {0x207E, 0x207E}, {0x2080, 0x2080}, - {0x2085, 0x2089}, {0x208A, 0x208C}, {0x208D, 0x208D}, - {0x208E, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8}, - {0x20AA, 0x20AB}, {0x20AD, 0x20BE}, {0x20D0, 0x20DC}, - {0x20DD, 0x20E0}, {0x20E1, 0x20E1}, {0x20E2, 0x20E4}, - {0x20E5, 0x20F0}, {0x2100, 0x2101}, {0x2102, 0x2102}, - {0x2104, 0x2104}, {0x2106, 0x2106}, {0x2107, 0x2107}, - {0x2108, 0x2108}, {0x210A, 0x2112}, {0x2114, 0x2114}, - {0x2115, 0x2115}, {0x2117, 0x2117}, {0x2118, 0x2118}, - {0x2119, 0x211D}, {0x211E, 0x2120}, {0x2123, 0x2123}, - {0x2124, 0x2124}, {0x2125, 0x2125}, {0x2127, 0x2127}, - {0x2128, 0x2128}, {0x2129, 0x2129}, {0x212A, 0x212A}, - {0x212C, 0x212D}, {0x212E, 0x212E}, {0x212F, 0x2134}, - {0x2135, 0x2138}, {0x2139, 0x2139}, {0x213A, 0x213B}, - {0x213C, 0x213F}, {0x2140, 0x2144}, {0x2145, 0x2149}, - {0x214A, 0x214A}, {0x214B, 0x214B}, {0x214C, 0x214D}, - {0x214E, 0x214E}, {0x214F, 0x214F}, {0x2150, 0x2152}, - {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F}, - {0x217A, 0x2182}, {0x2183, 0x2184}, {0x2185, 0x2188}, - {0x218A, 0x218B}, {0x219A, 0x219B}, {0x219C, 0x219F}, - {0x21A0, 0x21A0}, {0x21A1, 0x21A2}, {0x21A3, 0x21A3}, - {0x21A4, 0x21A5}, {0x21A6, 0x21A6}, {0x21A7, 0x21AD}, - {0x21AE, 0x21AE}, {0x21AF, 0x21B7}, {0x21BA, 0x21CD}, - {0x21CE, 0x21CF}, {0x21D0, 0x21D1}, {0x21D3, 0x21D3}, - {0x21D5, 0x21E6}, {0x21E8, 0x21F3}, {0x21F4, 0x21FF}, - {0x2201, 0x2201}, {0x2204, 0x2206}, {0x2209, 0x220A}, - {0x220C, 0x220E}, {0x2210, 0x2210}, {0x2212, 0x2214}, - {0x2216, 0x2219}, {0x221B, 0x221C}, {0x2221, 0x2222}, - {0x2224, 0x2224}, {0x2226, 0x2226}, {0x222D, 0x222D}, - {0x222F, 0x2233}, {0x2238, 0x223B}, {0x223E, 0x2247}, - {0x2249, 0x224B}, {0x224D, 0x2251}, {0x2253, 0x225F}, - {0x2262, 0x2263}, {0x2268, 0x2269}, {0x226C, 0x226D}, - {0x2270, 0x2281}, {0x2284, 0x2285}, {0x2288, 0x2294}, - {0x2296, 0x2298}, {0x229A, 0x22A4}, {0x22A6, 0x22BE}, - {0x22C0, 0x22FF}, {0x2300, 0x2307}, {0x2308, 0x2308}, - {0x2309, 0x2309}, {0x230A, 0x230A}, {0x230B, 0x230B}, - {0x230C, 0x2311}, {0x2313, 0x2319}, {0x231C, 0x231F}, - {0x2320, 0x2321}, {0x2322, 0x2328}, {0x232B, 0x237B}, - {0x237C, 0x237C}, {0x237D, 0x239A}, {0x239B, 0x23B3}, - {0x23B4, 0x23DB}, {0x23DC, 0x23E1}, {0x23E2, 0x23E8}, - {0x23ED, 0x23EF}, {0x23F1, 0x23F2}, {0x23F4, 0x23FE}, - {0x2400, 0x2426}, {0x2440, 0x244A}, {0x24EA, 0x24EA}, - {0x254C, 0x254F}, {0x2574, 0x257F}, {0x2590, 0x2591}, - {0x2596, 0x259F}, {0x25A2, 0x25A2}, {0x25AA, 0x25B1}, - {0x25B4, 0x25B5}, {0x25B8, 0x25BB}, {0x25BE, 0x25BF}, - {0x25C2, 0x25C5}, {0x25C9, 0x25CA}, {0x25CC, 0x25CD}, - {0x25D2, 0x25E1}, {0x25E6, 0x25EE}, {0x25F0, 0x25F7}, - {0x25F8, 0x25FC}, {0x25FF, 0x25FF}, {0x2600, 0x2604}, - {0x2607, 0x2608}, {0x260A, 0x260D}, {0x2610, 0x2613}, - {0x2616, 0x261B}, {0x261D, 0x261D}, {0x261F, 0x263F}, - {0x2641, 0x2641}, {0x2643, 0x2647}, {0x2654, 0x265F}, - {0x2662, 0x2662}, {0x2666, 0x2666}, {0x266B, 0x266B}, - {0x266E, 0x266E}, {0x2670, 0x267E}, {0x2680, 0x2692}, - {0x2694, 0x269D}, {0x26A0, 0x26A0}, {0x26A2, 0x26A9}, - {0x26AC, 0x26BC}, {0x26C0, 0x26C3}, {0x26E2, 0x26E2}, - {0x26E4, 0x26E7}, {0x2700, 0x2704}, {0x2706, 0x2709}, - {0x270C, 0x2727}, {0x2729, 0x273C}, {0x273E, 0x274B}, - {0x274D, 0x274D}, {0x274F, 0x2752}, {0x2756, 0x2756}, - {0x2758, 0x2767}, {0x2768, 0x2768}, {0x2769, 0x2769}, - {0x276A, 0x276A}, {0x276B, 0x276B}, {0x276C, 0x276C}, - {0x276D, 0x276D}, {0x276E, 0x276E}, {0x276F, 0x276F}, - {0x2770, 0x2770}, {0x2771, 0x2771}, {0x2772, 0x2772}, - {0x2773, 0x2773}, {0x2774, 0x2774}, {0x2775, 0x2775}, - {0x2780, 0x2793}, {0x2794, 0x2794}, {0x2798, 0x27AF}, - {0x27B1, 0x27BE}, {0x27C0, 0x27C4}, {0x27C5, 0x27C5}, - {0x27C6, 0x27C6}, {0x27C7, 0x27E5}, {0x27EE, 0x27EE}, - {0x27EF, 0x27EF}, {0x27F0, 0x27FF}, {0x2800, 0x28FF}, - {0x2900, 0x297F}, {0x2980, 0x2982}, {0x2983, 0x2983}, - {0x2984, 0x2984}, {0x2987, 0x2987}, {0x2988, 0x2988}, - {0x2989, 0x2989}, {0x298A, 0x298A}, {0x298B, 0x298B}, - {0x298C, 0x298C}, {0x298D, 0x298D}, {0x298E, 0x298E}, - {0x298F, 0x298F}, {0x2990, 0x2990}, {0x2991, 0x2991}, - {0x2992, 0x2992}, {0x2993, 0x2993}, {0x2994, 0x2994}, - {0x2995, 0x2995}, {0x2996, 0x2996}, {0x2997, 0x2997}, - {0x2998, 0x2998}, {0x2999, 0x29D7}, {0x29D8, 0x29D8}, - {0x29D9, 0x29D9}, {0x29DA, 0x29DA}, {0x29DB, 0x29DB}, - {0x29DC, 0x29FB}, {0x29FC, 0x29FC}, {0x29FD, 0x29FD}, - {0x29FE, 0x29FF}, {0x2A00, 0x2AFF}, {0x2B00, 0x2B1A}, - {0x2B1D, 0x2B2F}, {0x2B30, 0x2B44}, {0x2B45, 0x2B46}, - {0x2B47, 0x2B4C}, {0x2B4D, 0x2B4F}, {0x2B51, 0x2B54}, - {0x2B5A, 0x2B73}, {0x2B76, 0x2B95}, {0x2B98, 0x2BB9}, - {0x2BBD, 0x2BC8}, {0x2BCA, 0x2BD1}, {0x2BEC, 0x2BEF}, - {0x2C00, 0x2C2E}, {0x2C30, 0x2C5E}, {0x2C60, 0x2C7B}, - {0x2C7C, 0x2C7D}, {0x2C7E, 0x2C7F}, {0x2C80, 0x2CE4}, - {0x2CE5, 0x2CEA}, {0x2CEB, 0x2CEE}, {0x2CEF, 0x2CF1}, - {0x2CF2, 0x2CF3}, {0x2CF9, 0x2CFC}, {0x2CFD, 0x2CFD}, - {0x2CFE, 0x2CFF}, {0x2D00, 0x2D25}, {0x2D27, 0x2D27}, - {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D6F}, - {0x2D70, 0x2D70}, {0x2D7F, 0x2D7F}, {0x2D80, 0x2D96}, - {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6}, - {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE}, - {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, {0x2DE0, 0x2DFF}, - {0x2E00, 0x2E01}, {0x2E02, 0x2E02}, {0x2E03, 0x2E03}, - {0x2E04, 0x2E04}, {0x2E05, 0x2E05}, {0x2E06, 0x2E08}, - {0x2E09, 0x2E09}, {0x2E0A, 0x2E0A}, {0x2E0B, 0x2E0B}, - {0x2E0C, 0x2E0C}, {0x2E0D, 0x2E0D}, {0x2E0E, 0x2E16}, - {0x2E17, 0x2E17}, {0x2E18, 0x2E19}, {0x2E1A, 0x2E1A}, - {0x2E1B, 0x2E1B}, {0x2E1C, 0x2E1C}, {0x2E1D, 0x2E1D}, - {0x2E1E, 0x2E1F}, {0x2E20, 0x2E20}, {0x2E21, 0x2E21}, - {0x2E22, 0x2E22}, {0x2E23, 0x2E23}, {0x2E24, 0x2E24}, - {0x2E25, 0x2E25}, {0x2E26, 0x2E26}, {0x2E27, 0x2E27}, - {0x2E28, 0x2E28}, {0x2E29, 0x2E29}, {0x2E2A, 0x2E2E}, - {0x2E2F, 0x2E2F}, {0x2E30, 0x2E39}, {0x2E3A, 0x2E3B}, - {0x2E3C, 0x2E3F}, {0x2E40, 0x2E40}, {0x2E41, 0x2E41}, - {0x2E42, 0x2E42}, {0x2E43, 0x2E44}, {0x303F, 0x303F}, - {0x4DC0, 0x4DFF}, {0xA4D0, 0xA4F7}, {0xA4F8, 0xA4FD}, - {0xA4FE, 0xA4FF}, {0xA500, 0xA60B}, {0xA60C, 0xA60C}, - {0xA60D, 0xA60F}, {0xA610, 0xA61F}, {0xA620, 0xA629}, - {0xA62A, 0xA62B}, {0xA640, 0xA66D}, {0xA66E, 0xA66E}, - {0xA66F, 0xA66F}, {0xA670, 0xA672}, {0xA673, 0xA673}, - {0xA674, 0xA67D}, {0xA67E, 0xA67E}, {0xA67F, 0xA67F}, - {0xA680, 0xA69B}, {0xA69C, 0xA69D}, {0xA69E, 0xA69F}, - {0xA6A0, 0xA6E5}, {0xA6E6, 0xA6EF}, {0xA6F0, 0xA6F1}, - {0xA6F2, 0xA6F7}, {0xA700, 0xA716}, {0xA717, 0xA71F}, - {0xA720, 0xA721}, {0xA722, 0xA76F}, {0xA770, 0xA770}, - {0xA771, 0xA787}, {0xA788, 0xA788}, {0xA789, 0xA78A}, - {0xA78B, 0xA78E}, {0xA78F, 0xA78F}, {0xA790, 0xA7AE}, - {0xA7B0, 0xA7B7}, {0xA7F7, 0xA7F7}, {0xA7F8, 0xA7F9}, - {0xA7FA, 0xA7FA}, {0xA7FB, 0xA7FF}, {0xA800, 0xA801}, - {0xA802, 0xA802}, {0xA803, 0xA805}, {0xA806, 0xA806}, - {0xA807, 0xA80A}, {0xA80B, 0xA80B}, {0xA80C, 0xA822}, - {0xA823, 0xA824}, {0xA825, 0xA826}, {0xA827, 0xA827}, - {0xA828, 0xA82B}, {0xA830, 0xA835}, {0xA836, 0xA837}, - {0xA838, 0xA838}, {0xA839, 0xA839}, {0xA840, 0xA873}, - {0xA874, 0xA877}, {0xA880, 0xA881}, {0xA882, 0xA8B3}, - {0xA8B4, 0xA8C3}, {0xA8C4, 0xA8C5}, {0xA8CE, 0xA8CF}, - {0xA8D0, 0xA8D9}, {0xA8E0, 0xA8F1}, {0xA8F2, 0xA8F7}, - {0xA8F8, 0xA8FA}, {0xA8FB, 0xA8FB}, {0xA8FC, 0xA8FC}, - {0xA8FD, 0xA8FD}, {0xA900, 0xA909}, {0xA90A, 0xA925}, - {0xA926, 0xA92D}, {0xA92E, 0xA92F}, {0xA930, 0xA946}, - {0xA947, 0xA951}, {0xA952, 0xA953}, {0xA95F, 0xA95F}, - {0xA980, 0xA982}, {0xA983, 0xA983}, {0xA984, 0xA9B2}, - {0xA9B3, 0xA9B3}, {0xA9B4, 0xA9B5}, {0xA9B6, 0xA9B9}, - {0xA9BA, 0xA9BB}, {0xA9BC, 0xA9BC}, {0xA9BD, 0xA9C0}, - {0xA9C1, 0xA9CD}, {0xA9CF, 0xA9CF}, {0xA9D0, 0xA9D9}, - {0xA9DE, 0xA9DF}, {0xA9E0, 0xA9E4}, {0xA9E5, 0xA9E5}, - {0xA9E6, 0xA9E6}, {0xA9E7, 0xA9EF}, {0xA9F0, 0xA9F9}, - {0xA9FA, 0xA9FE}, {0xAA00, 0xAA28}, {0xAA29, 0xAA2E}, - {0xAA2F, 0xAA30}, {0xAA31, 0xAA32}, {0xAA33, 0xAA34}, - {0xAA35, 0xAA36}, {0xAA40, 0xAA42}, {0xAA43, 0xAA43}, - {0xAA44, 0xAA4B}, {0xAA4C, 0xAA4C}, {0xAA4D, 0xAA4D}, - {0xAA50, 0xAA59}, {0xAA5C, 0xAA5F}, {0xAA60, 0xAA6F}, - {0xAA70, 0xAA70}, {0xAA71, 0xAA76}, {0xAA77, 0xAA79}, - {0xAA7A, 0xAA7A}, {0xAA7B, 0xAA7B}, {0xAA7C, 0xAA7C}, - {0xAA7D, 0xAA7D}, {0xAA7E, 0xAA7F}, {0xAA80, 0xAAAF}, - {0xAAB0, 0xAAB0}, {0xAAB1, 0xAAB1}, {0xAAB2, 0xAAB4}, - {0xAAB5, 0xAAB6}, {0xAAB7, 0xAAB8}, {0xAAB9, 0xAABD}, - {0xAABE, 0xAABF}, {0xAAC0, 0xAAC0}, {0xAAC1, 0xAAC1}, - {0xAAC2, 0xAAC2}, {0xAADB, 0xAADC}, {0xAADD, 0xAADD}, - {0xAADE, 0xAADF}, {0xAAE0, 0xAAEA}, {0xAAEB, 0xAAEB}, - {0xAAEC, 0xAAED}, {0xAAEE, 0xAAEF}, {0xAAF0, 0xAAF1}, - {0xAAF2, 0xAAF2}, {0xAAF3, 0xAAF4}, {0xAAF5, 0xAAF5}, - {0xAAF6, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, - {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, - {0xAB30, 0xAB5A}, {0xAB5B, 0xAB5B}, {0xAB5C, 0xAB5F}, - {0xAB60, 0xAB65}, {0xAB70, 0xABBF}, {0xABC0, 0xABE2}, - {0xABE3, 0xABE4}, {0xABE5, 0xABE5}, {0xABE6, 0xABE7}, - {0xABE8, 0xABE8}, {0xABE9, 0xABEA}, {0xABEB, 0xABEB}, - {0xABEC, 0xABEC}, {0xABED, 0xABED}, {0xABF0, 0xABF9}, - {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDB7F}, - {0xDB80, 0xDBFF}, {0xDC00, 0xDFFF}, {0xFB00, 0xFB06}, - {0xFB13, 0xFB17}, {0xFB1D, 0xFB1D}, {0xFB1E, 0xFB1E}, - {0xFB1F, 0xFB28}, {0xFB29, 0xFB29}, {0xFB2A, 0xFB36}, - {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, - {0xFB43, 0xFB44}, {0xFB46, 0xFB4F}, {0xFB50, 0xFBB1}, - {0xFBB2, 0xFBC1}, {0xFBD3, 0xFD3D}, {0xFD3E, 0xFD3E}, - {0xFD3F, 0xFD3F}, {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, - {0xFDF0, 0xFDFB}, {0xFDFC, 0xFDFC}, {0xFDFD, 0xFDFD}, - {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, - {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFC, 0xFFFC}, - {0x10000, 0x1000B}, {0x1000D, 0x10026}, {0x10028, 0x1003A}, - {0x1003C, 0x1003D}, {0x1003F, 0x1004D}, {0x10050, 0x1005D}, - {0x10080, 0x100FA}, {0x10100, 0x10102}, {0x10107, 0x10133}, - {0x10137, 0x1013F}, {0x10140, 0x10174}, {0x10175, 0x10178}, - {0x10179, 0x10189}, {0x1018A, 0x1018B}, {0x1018C, 0x1018E}, - {0x10190, 0x1019B}, {0x101A0, 0x101A0}, {0x101D0, 0x101FC}, - {0x101FD, 0x101FD}, {0x10280, 0x1029C}, {0x102A0, 0x102D0}, - {0x102E0, 0x102E0}, {0x102E1, 0x102FB}, {0x10300, 0x1031F}, - {0x10320, 0x10323}, {0x10330, 0x10340}, {0x10341, 0x10341}, - {0x10342, 0x10349}, {0x1034A, 0x1034A}, {0x10350, 0x10375}, - {0x10376, 0x1037A}, {0x10380, 0x1039D}, {0x1039F, 0x1039F}, - {0x103A0, 0x103C3}, {0x103C8, 0x103CF}, {0x103D0, 0x103D0}, - {0x103D1, 0x103D5}, {0x10400, 0x1044F}, {0x10450, 0x1047F}, - {0x10480, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3}, - {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, - {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755}, - {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808}, - {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C}, - {0x1083F, 0x1083F}, {0x10840, 0x10855}, {0x10857, 0x10857}, - {0x10858, 0x1085F}, {0x10860, 0x10876}, {0x10877, 0x10878}, - {0x10879, 0x1087F}, {0x10880, 0x1089E}, {0x108A7, 0x108AF}, - {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x108FF}, - {0x10900, 0x10915}, {0x10916, 0x1091B}, {0x1091F, 0x1091F}, - {0x10920, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x1099F}, - {0x109A0, 0x109B7}, {0x109BC, 0x109BD}, {0x109BE, 0x109BF}, - {0x109C0, 0x109CF}, {0x109D2, 0x109FF}, {0x10A00, 0x10A00}, - {0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, - {0x10A10, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A33}, - {0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F}, {0x10A40, 0x10A47}, - {0x10A50, 0x10A58}, {0x10A60, 0x10A7C}, {0x10A7D, 0x10A7E}, - {0x10A7F, 0x10A7F}, {0x10A80, 0x10A9C}, {0x10A9D, 0x10A9F}, - {0x10AC0, 0x10AC7}, {0x10AC8, 0x10AC8}, {0x10AC9, 0x10AE4}, - {0x10AE5, 0x10AE6}, {0x10AEB, 0x10AEF}, {0x10AF0, 0x10AF6}, - {0x10B00, 0x10B35}, {0x10B39, 0x10B3F}, {0x10B40, 0x10B55}, - {0x10B58, 0x10B5F}, {0x10B60, 0x10B72}, {0x10B78, 0x10B7F}, - {0x10B80, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF}, - {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, - {0x10CFA, 0x10CFF}, {0x10E60, 0x10E7E}, {0x11000, 0x11000}, - {0x11001, 0x11001}, {0x11002, 0x11002}, {0x11003, 0x11037}, - {0x11038, 0x11046}, {0x11047, 0x1104D}, {0x11052, 0x11065}, - {0x11066, 0x1106F}, {0x1107F, 0x1107F}, {0x11080, 0x11081}, - {0x11082, 0x11082}, {0x11083, 0x110AF}, {0x110B0, 0x110B2}, - {0x110B3, 0x110B6}, {0x110B7, 0x110B8}, {0x110B9, 0x110BA}, - {0x110BB, 0x110BC}, {0x110BD, 0x110BD}, {0x110BE, 0x110C1}, - {0x110D0, 0x110E8}, {0x110F0, 0x110F9}, {0x11100, 0x11102}, - {0x11103, 0x11126}, {0x11127, 0x1112B}, {0x1112C, 0x1112C}, - {0x1112D, 0x11134}, {0x11136, 0x1113F}, {0x11140, 0x11143}, - {0x11150, 0x11172}, {0x11173, 0x11173}, {0x11174, 0x11175}, - {0x11176, 0x11176}, {0x11180, 0x11181}, {0x11182, 0x11182}, - {0x11183, 0x111B2}, {0x111B3, 0x111B5}, {0x111B6, 0x111BE}, - {0x111BF, 0x111C0}, {0x111C1, 0x111C4}, {0x111C5, 0x111C9}, - {0x111CA, 0x111CC}, {0x111CD, 0x111CD}, {0x111D0, 0x111D9}, - {0x111DA, 0x111DA}, {0x111DB, 0x111DB}, {0x111DC, 0x111DC}, - {0x111DD, 0x111DF}, {0x111E1, 0x111F4}, {0x11200, 0x11211}, - {0x11213, 0x1122B}, {0x1122C, 0x1122E}, {0x1122F, 0x11231}, - {0x11232, 0x11233}, {0x11234, 0x11234}, {0x11235, 0x11235}, - {0x11236, 0x11237}, {0x11238, 0x1123D}, {0x1123E, 0x1123E}, - {0x11280, 0x11286}, {0x11288, 0x11288}, {0x1128A, 0x1128D}, - {0x1128F, 0x1129D}, {0x1129F, 0x112A8}, {0x112A9, 0x112A9}, - {0x112B0, 0x112DE}, {0x112DF, 0x112DF}, {0x112E0, 0x112E2}, - {0x112E3, 0x112EA}, {0x112F0, 0x112F9}, {0x11300, 0x11301}, - {0x11302, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310}, - {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333}, - {0x11335, 0x11339}, {0x1133C, 0x1133C}, {0x1133D, 0x1133D}, - {0x1133E, 0x1133F}, {0x11340, 0x11340}, {0x11341, 0x11344}, - {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11350, 0x11350}, - {0x11357, 0x11357}, {0x1135D, 0x11361}, {0x11362, 0x11363}, - {0x11366, 0x1136C}, {0x11370, 0x11374}, {0x11400, 0x11434}, - {0x11435, 0x11437}, {0x11438, 0x1143F}, {0x11440, 0x11441}, - {0x11442, 0x11444}, {0x11445, 0x11445}, {0x11446, 0x11446}, - {0x11447, 0x1144A}, {0x1144B, 0x1144F}, {0x11450, 0x11459}, - {0x1145B, 0x1145B}, {0x1145D, 0x1145D}, {0x11480, 0x114AF}, - {0x114B0, 0x114B2}, {0x114B3, 0x114B8}, {0x114B9, 0x114B9}, - {0x114BA, 0x114BA}, {0x114BB, 0x114BE}, {0x114BF, 0x114C0}, - {0x114C1, 0x114C1}, {0x114C2, 0x114C3}, {0x114C4, 0x114C5}, - {0x114C6, 0x114C6}, {0x114C7, 0x114C7}, {0x114D0, 0x114D9}, - {0x11580, 0x115AE}, {0x115AF, 0x115B1}, {0x115B2, 0x115B5}, - {0x115B8, 0x115BB}, {0x115BC, 0x115BD}, {0x115BE, 0x115BE}, - {0x115BF, 0x115C0}, {0x115C1, 0x115D7}, {0x115D8, 0x115DB}, - {0x115DC, 0x115DD}, {0x11600, 0x1162F}, {0x11630, 0x11632}, - {0x11633, 0x1163A}, {0x1163B, 0x1163C}, {0x1163D, 0x1163D}, - {0x1163E, 0x1163E}, {0x1163F, 0x11640}, {0x11641, 0x11643}, - {0x11644, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C}, - {0x11680, 0x116AA}, {0x116AB, 0x116AB}, {0x116AC, 0x116AC}, - {0x116AD, 0x116AD}, {0x116AE, 0x116AF}, {0x116B0, 0x116B5}, - {0x116B6, 0x116B6}, {0x116B7, 0x116B7}, {0x116C0, 0x116C9}, - {0x11700, 0x11719}, {0x1171D, 0x1171F}, {0x11720, 0x11721}, - {0x11722, 0x11725}, {0x11726, 0x11726}, {0x11727, 0x1172B}, - {0x11730, 0x11739}, {0x1173A, 0x1173B}, {0x1173C, 0x1173E}, - {0x1173F, 0x1173F}, {0x118A0, 0x118DF}, {0x118E0, 0x118E9}, - {0x118EA, 0x118F2}, {0x118FF, 0x118FF}, {0x11AC0, 0x11AF8}, - {0x11C00, 0x11C08}, {0x11C0A, 0x11C2E}, {0x11C2F, 0x11C2F}, - {0x11C30, 0x11C36}, {0x11C38, 0x11C3D}, {0x11C3E, 0x11C3E}, - {0x11C3F, 0x11C3F}, {0x11C40, 0x11C40}, {0x11C41, 0x11C45}, - {0x11C50, 0x11C59}, {0x11C5A, 0x11C6C}, {0x11C70, 0x11C71}, - {0x11C72, 0x11C8F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CA9}, - {0x11CAA, 0x11CB0}, {0x11CB1, 0x11CB1}, {0x11CB2, 0x11CB3}, - {0x11CB4, 0x11CB4}, {0x11CB5, 0x11CB6}, {0x12000, 0x12399}, - {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543}, - {0x13000, 0x1342E}, {0x14400, 0x14646}, {0x16800, 0x16A38}, - {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, {0x16A6E, 0x16A6F}, - {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF4}, {0x16AF5, 0x16AF5}, - {0x16B00, 0x16B2F}, {0x16B30, 0x16B36}, {0x16B37, 0x16B3B}, - {0x16B3C, 0x16B3F}, {0x16B40, 0x16B43}, {0x16B44, 0x16B44}, - {0x16B45, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, - {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16F00, 0x16F44}, - {0x16F50, 0x16F50}, {0x16F51, 0x16F7E}, {0x16F8F, 0x16F92}, - {0x16F93, 0x16F9F}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, - {0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BC9C}, - {0x1BC9D, 0x1BC9E}, {0x1BC9F, 0x1BC9F}, {0x1BCA0, 0x1BCA3}, - {0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D164}, - {0x1D165, 0x1D166}, {0x1D167, 0x1D169}, {0x1D16A, 0x1D16C}, - {0x1D16D, 0x1D172}, {0x1D173, 0x1D17A}, {0x1D17B, 0x1D182}, - {0x1D183, 0x1D184}, {0x1D185, 0x1D18B}, {0x1D18C, 0x1D1A9}, - {0x1D1AA, 0x1D1AD}, {0x1D1AE, 0x1D1E8}, {0x1D200, 0x1D241}, - {0x1D242, 0x1D244}, {0x1D245, 0x1D245}, {0x1D300, 0x1D356}, - {0x1D360, 0x1D371}, {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, - {0x1D49E, 0x1D49F}, {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, - {0x1D4A9, 0x1D4AC}, {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, - {0x1D4BD, 0x1D4C3}, {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, - {0x1D50D, 0x1D514}, {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, - {0x1D53B, 0x1D53E}, {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, - {0x1D54A, 0x1D550}, {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D6C0}, - {0x1D6C1, 0x1D6C1}, {0x1D6C2, 0x1D6DA}, {0x1D6DB, 0x1D6DB}, - {0x1D6DC, 0x1D6FA}, {0x1D6FB, 0x1D6FB}, {0x1D6FC, 0x1D714}, - {0x1D715, 0x1D715}, {0x1D716, 0x1D734}, {0x1D735, 0x1D735}, - {0x1D736, 0x1D74E}, {0x1D74F, 0x1D74F}, {0x1D750, 0x1D76E}, - {0x1D76F, 0x1D76F}, {0x1D770, 0x1D788}, {0x1D789, 0x1D789}, - {0x1D78A, 0x1D7A8}, {0x1D7A9, 0x1D7A9}, {0x1D7AA, 0x1D7C2}, - {0x1D7C3, 0x1D7C3}, {0x1D7C4, 0x1D7CB}, {0x1D7CE, 0x1D7FF}, - {0x1D800, 0x1D9FF}, {0x1DA00, 0x1DA36}, {0x1DA37, 0x1DA3A}, - {0x1DA3B, 0x1DA6C}, {0x1DA6D, 0x1DA74}, {0x1DA75, 0x1DA75}, - {0x1DA76, 0x1DA83}, {0x1DA84, 0x1DA84}, {0x1DA85, 0x1DA86}, - {0x1DA87, 0x1DA8B}, {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, - {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, - {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, {0x1E800, 0x1E8C4}, - {0x1E8C7, 0x1E8CF}, {0x1E8D0, 0x1E8D6}, {0x1E900, 0x1E943}, - {0x1E944, 0x1E94A}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F}, - {0x1EE00, 0x1EE03}, {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, - {0x1EE24, 0x1EE24}, {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, - {0x1EE34, 0x1EE37}, {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, - {0x1EE42, 0x1EE42}, {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, - {0x1EE4B, 0x1EE4B}, {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, - {0x1EE54, 0x1EE54}, {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, - {0x1EE5B, 0x1EE5B}, {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, - {0x1EE61, 0x1EE62}, {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, - {0x1EE6C, 0x1EE72}, {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, - {0x1EE7E, 0x1EE7E}, {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, - {0x1EEA1, 0x1EEA3}, {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, - {0x1EEF0, 0x1EEF1}, {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, - {0x1F030, 0x1F093}, {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, - {0x1F0C1, 0x1F0CE}, {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10C}, - {0x1F12E, 0x1F12E}, {0x1F16A, 0x1F16B}, {0x1F1E6, 0x1F1FF}, - {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, - {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF}, - {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F}, - {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A}, - {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594}, - {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F}, - {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6E0, 0x1F6EA}, - {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773}, {0x1F780, 0x1F7D4}, - {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, {0x1F850, 0x1F859}, - {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, {0xE0001, 0xE0001}, - {0xE0020, 0xE007F}, -} - -// Condition have flag EastAsianWidth whether the current locale is CJK or not. -type Condition struct { - EastAsianWidth bool -} - -// NewCondition return new instance of Condition which is current locale. -func NewCondition() *Condition { - return &Condition{EastAsianWidth} -} - -// RuneWidth returns the number of cells in r. -// See http://www.unicode.org/reports/tr11/ -func (c *Condition) RuneWidth(r rune) int { - switch { - case r < 0 || r > 0x10FFFF || - inTables(r, nonprint, combining, notassigned): - return 0 - case (c.EastAsianWidth && IsAmbiguousWidth(r)) || - inTables(r, doublewidth, emoji): - return 2 - default: - return 1 - } -} - -// StringWidth return width as you can see -func (c *Condition) StringWidth(s string) (width int) { - for _, r := range []rune(s) { - width += c.RuneWidth(r) - } - return width -} - -// Truncate return string truncated with w cells -func (c *Condition) Truncate(s string, w int, tail string) string { - if c.StringWidth(s) <= w { - return s - } - r := []rune(s) - tw := c.StringWidth(tail) - w -= tw - width := 0 - i := 0 - for ; i < len(r); i++ { - cw := c.RuneWidth(r[i]) - if width+cw > w { - break - } - width += cw - } - return string(r[0:i]) + tail -} - -// Wrap return string wrapped with w cells -func (c *Condition) Wrap(s string, w int) string { - width := 0 - out := "" - for _, r := range []rune(s) { - cw := RuneWidth(r) - if r == '\n' { - out += string(r) - width = 0 - continue - } else if width+cw > w { - out += "\n" - width = 0 - out += string(r) - width += cw - continue - } - out += string(r) - width += cw - } - return out -} - -// FillLeft return string filled in left by spaces in w cells -func (c *Condition) FillLeft(s string, w int) string { - width := c.StringWidth(s) - count := w - width - if count > 0 { - b := make([]byte, count) - for i := range b { - b[i] = ' ' - } - return string(b) + s - } - return s -} - -// FillRight return string filled in left by spaces in w cells -func (c *Condition) FillRight(s string, w int) string { - width := c.StringWidth(s) - count := w - width - if count > 0 { - b := make([]byte, count) - for i := range b { - b[i] = ' ' - } - return s + string(b) - } - return s -} - -// RuneWidth returns the number of cells in r. -// See http://www.unicode.org/reports/tr11/ -func RuneWidth(r rune) int { - return DefaultCondition.RuneWidth(r) -} - -// IsAmbiguousWidth returns whether is ambiguous width or not. -func IsAmbiguousWidth(r rune) bool { - return inTables(r, private, ambiguous) -} - -// IsNeutralWidth returns whether is neutral width or not. -func IsNeutralWidth(r rune) bool { - return inTable(r, neutral) -} - -// StringWidth return width as you can see -func StringWidth(s string) (width int) { - return DefaultCondition.StringWidth(s) -} - -// Truncate return string truncated with w cells -func Truncate(s string, w int, tail string) string { - return DefaultCondition.Truncate(s, w, tail) -} - -// Wrap return string wrapped with w cells -func Wrap(s string, w int) string { - return DefaultCondition.Wrap(s, w) -} - -// FillLeft return string filled in left by spaces in w cells -func FillLeft(s string, w int) string { - return DefaultCondition.FillLeft(s, w) -} - -// FillRight return string filled in left by spaces in w cells -func FillRight(s string, w int) string { - return DefaultCondition.FillRight(s, w) -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go deleted file mode 100644 index 0ce32c5e7..000000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth_js.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build js - -package runewidth - -func IsEastAsian() bool { - // TODO: Implement this for the web. Detect east asian in a compatible way, and return true. - return false -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go deleted file mode 100644 index c579e9a31..000000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go +++ /dev/null @@ -1,77 +0,0 @@ -// +build !windows,!js - -package runewidth - -import ( - "os" - "regexp" - "strings" -) - -var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`) - -var mblenTable = map[string]int{ - "utf-8": 6, - "utf8": 6, - "jis": 8, - "eucjp": 3, - "euckr": 2, - "euccn": 2, - "sjis": 2, - "cp932": 2, - "cp51932": 2, - "cp936": 2, - "cp949": 2, - "cp950": 2, - "big5": 2, - "gbk": 2, - "gb2312": 2, -} - -func isEastAsian(locale string) bool { - charset := strings.ToLower(locale) - r := reLoc.FindStringSubmatch(locale) - if len(r) == 2 { - charset = strings.ToLower(r[1]) - } - - if strings.HasSuffix(charset, "@cjk_narrow") { - return false - } - - for pos, b := range []byte(charset) { - if b == '@' { - charset = charset[:pos] - break - } - } - max := 1 - if m, ok := mblenTable[charset]; ok { - max = m - } - if max > 1 && (charset[0] != 'u' || - strings.HasPrefix(locale, "ja") || - strings.HasPrefix(locale, "ko") || - strings.HasPrefix(locale, "zh")) { - return true - } - return false -} - -// IsEastAsian return true if the current locale is CJK -func IsEastAsian() bool { - locale := os.Getenv("LC_CTYPE") - if locale == "" { - locale = os.Getenv("LANG") - } - - // ignore C locale - if locale == "POSIX" || locale == "C" { - return false - } - if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') { - return false - } - - return isEastAsian(locale) -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go deleted file mode 100644 index 0258876b9..000000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -package runewidth - -import ( - "syscall" -) - -var ( - kernel32 = syscall.NewLazyDLL("kernel32") - procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP") -) - -// IsEastAsian return true if the current locale is CJK -func IsEastAsian() bool { - r1, _, _ := procGetConsoleOutputCP.Call() - if r1 == 0 { - return false - } - - switch int(r1) { - case 932, 51932, 936, 949, 950: - return true - } - - return false -} diff --git a/vendor/vendor.json b/vendor/vendor.json index e536cbb7d..d4d157eb9 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -565,12 +565,6 @@ "path": "github.com/biogo/hts/bgzf", "revision": "50da7d4131a3b5c9d063932461cab4d1fafb20b0" }, - { - "checksumSHA1": "ymc5+iJ+1ipls3ihqPdzMjFYCqo=", - "path": "github.com/cheggaaa/pb", - "revision": "18d384da9bdc1e5a08fc2a62a494c321d9ae74ea", - "revisionTime": "2017-12-14T13:20:59Z" - }, { "checksumSHA1": "Lf3uUXTkKK5DJ37BxQvxO1Fq+K8=", "comment": "v1.0.0-3-g6d21280", @@ -822,12 +816,6 @@ "path": "github.com/hashicorp/yamux", "revision": "df949784da9ed028ee76df44652e42d37a09d7e4" }, - { - "checksumSHA1": "28nznojcl6Ejtm6I1tKozgy0isg=", - "path": "github.com/jlaffaye/ftp", - "revision": "025815df64030c144b86e368fedf80ee195fe464", - "revisionTime": "2016-02-29T21:52:38Z" - }, { "checksumSHA1": "3/Bhy+ua/DCv2ElMD5GzOYSGN6g=", "comment": "0.2.2-2-gc01cf91", @@ -929,12 +917,6 @@ "path": "github.com/mattn/go-isatty", "revision": "56b76bdf51f7708750eac80fa38b952bb9f32639" }, - { - "checksumSHA1": "MNkKJyk2TazKMJYbal5wFHybpyA=", - "path": "github.com/mattn/go-runewidth", - "revision": "14207d285c6c197daabb5c9793d63e7af9ab2d50", - "revisionTime": "2017-02-01T02:35:40Z" - }, { "checksumSHA1": "UP+pXl+ic9y6qrpZA5MqDIAuGfw=", "path": "github.com/mitchellh/cli", @@ -1498,16 +1480,6 @@ "checksumSHA1": "U7dGDNwEHORvJFMoNSXErKE7ITg=", "path": "google.golang.org/cloud/internal", "revision": "5a3b06f8b5da3b7c3a93da43163b872c86c509ef" - }, - { - "checksumSHA1": "gY2M/3SCxqgrPz+P/N6JQ+m/wnA=", - "path": "gopkg.in/xmlpath.v2", - "revision": "860cbeca3ebcc600db0b213c0e83ad6ce91f5739" - }, - { - "checksumSHA1": "0dDWcU08d0FS4d99NCgCkGLjjqw=", - "path": "gopkg.in/xmlpath.v2/cmd/webpath", - "revision": "860cbeca3ebcc600db0b213c0e83ad6ce91f5739" } ], "rootPath": "github.com/hashicorp/packer" From 4a1fb0d26242906229a6b3ab876a1b66feee37f8 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Sat, 6 Jan 2018 19:46:52 -0600 Subject: [PATCH 054/251] Grrr...gofmt -w common/*.go --- common/config_test.go | 4 ++-- common/download.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/common/config_test.go b/common/config_test.go index f9b1748ce..bb2889f33 100644 --- a/common/config_test.go +++ b/common/config_test.go @@ -197,8 +197,8 @@ func TestDownloadableURL_FilePaths(t *testing.T) { } expected := fmt.Sprintf("%s%s", - filePrefix, - strings.Replace(tfPath, `\`, `/`, -1)) + filePrefix, + strings.Replace(tfPath, `\`, `/`, -1)) if u != expected { t.Fatalf("unexpected: %#v != %#v", u, expected) } diff --git a/common/download.go b/common/download.go index 27e9b32a8..fb7ce5584 100644 --- a/common/download.go +++ b/common/download.go @@ -208,7 +208,7 @@ func (d *DownloadClient) Get() (string, error) { } func (d *DownloadClient) PercentProgress() int { - if (d.downloader == nil) { + if d.downloader == nil { return -1 } From 46a5ca30e5823e5b0c9cbdfafc9ca85da06b14d5 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Sat, 6 Jan 2018 19:51:11 -0600 Subject: [PATCH 055/251] Removed call to filepath.Rel(...) in builder/vmware/iso/step_create_vmx.go --- builder/vmware/iso/step_create_vmx.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index 292a6a6e7..1f4ee812f 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -43,11 +43,6 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { isoPath := state.Get("iso_path").(string) ui := state.Get("ui").(packer.Ui) - // Convert the iso_path into a path relative to the .vmx file if possible - if relativeIsoPath, err := filepath.Abs(config.VMXTemplatePath, filepath.FromSlash(isoPath)); err == nil { - isoPath = relativeIsoPath - } - ui.Say("Building and writing VMX file") vmxTemplate := DefaultVMXTemplate From 3cf448f6ec8b5339619ba6d0435e80d1eee7a8ac Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Sat, 6 Jan 2018 20:02:37 -0600 Subject: [PATCH 056/251] Reverted previously removed additions of tests that check for ftp:// or nonexistent-protocol:// using DownloadableURL. DownloadableURL's responsibility is not to have inherent knowledge of protocols that are available, but to format an invalid url/path to a valid url/path. --- builder/virtualbox/ovf/config_test.go | 11 ----------- common/config_test.go | 6 ------ 2 files changed, 17 deletions(-) diff --git a/builder/virtualbox/ovf/config_test.go b/builder/virtualbox/ovf/config_test.go index baba657a8..e9c536e54 100644 --- a/builder/virtualbox/ovf/config_test.go +++ b/builder/virtualbox/ovf/config_test.go @@ -76,17 +76,6 @@ func TestNewConfig_sourcePath(t *testing.T) { t.Fatalf("Nonexistant file should throw a validation error!") } - // Bad - c = testConfig(t) - c["source_path"] = "nonexistent-protocol://i/dont/exist" - _, warns, err = NewConfig(c) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatalf("should error") - } - // Good tf := getTempFile(t) defer os.Remove(tf.Name()) diff --git a/common/config_test.go b/common/config_test.go index bb2889f33..da93a19ef 100644 --- a/common/config_test.go +++ b/common/config_test.go @@ -43,12 +43,6 @@ func TestDownloadableURL(t *testing.T) { t.Fatalf("expected err : %s", err) } - // Invalid: unsupported scheme - _, err = DownloadableURL("nonexistent-protocol://host.com/path") - if err == nil { - t.Fatalf("expected err : %s", err) - } - // Valid: http u, err := DownloadableURL("HTTP://packer.io/path") if err != nil { From c17f827e1d5d82c12e036552148dd70883c2aa03 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Tue, 9 Jan 2018 20:08:06 -0600 Subject: [PATCH 057/251] Split up DownloadableURL() into it's individual components: SupportedURL(), DownloadableURL(), and ValidatedURL(). Updated all instances of DownloadableURL() to point to ValidatedURL(). Reverted the tests that are based on un-supported protocols. --- .../common/step_download_guest_additions.go | 2 +- builder/virtualbox/ovf/config.go | 2 +- builder/virtualbox/ovf/config_test.go | 11 ++ common/config.go | 111 ++++++++++++------ common/config_test.go | 18 ++- common/download.go | 2 - common/iso_config.go | 2 +- 7 files changed, 102 insertions(+), 46 deletions(-) diff --git a/builder/virtualbox/common/step_download_guest_additions.go b/builder/virtualbox/common/step_download_guest_additions.go index 93f3952c2..d0522e093 100644 --- a/builder/virtualbox/common/step_download_guest_additions.go +++ b/builder/virtualbox/common/step_download_guest_additions.go @@ -121,7 +121,7 @@ func (s *StepDownloadGuestAdditions) Run(state multistep.StateBag) multistep.Ste } // Convert the file/url to an actual URL for step_download to process. - url, err = common.DownloadableURL(url) + url, err = common.ValidatedURL(url) if err != nil { err := fmt.Errorf("Error preparing guest additions url: %s", err) state.Put("error", err) diff --git a/builder/virtualbox/ovf/config.go b/builder/virtualbox/ovf/config.go index 41a013ebd..5eef4507c 100644 --- a/builder/virtualbox/ovf/config.go +++ b/builder/virtualbox/ovf/config.go @@ -97,7 +97,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { if c.SourcePath == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("source_path is required")) } else { - c.SourcePath, err = common.DownloadableURL(c.SourcePath) + c.SourcePath, err = common.ValidatedURL(c.SourcePath) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("source_path is invalid: %s", err)) } diff --git a/builder/virtualbox/ovf/config_test.go b/builder/virtualbox/ovf/config_test.go index e9c536e54..51412a0c5 100644 --- a/builder/virtualbox/ovf/config_test.go +++ b/builder/virtualbox/ovf/config_test.go @@ -76,6 +76,17 @@ func TestNewConfig_sourcePath(t *testing.T) { t.Fatalf("Nonexistant file should throw a validation error!") } + // Bad + c = testConfig(t) + c["source_path"] = "ftp://i/dont/exist" + _, warns, err = NewConfig(c) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err == nil { + t.Fatalf("should error") + } + // Good tf := getTempFile(t) defer os.Remove(tf.Name()) diff --git a/common/config.go b/common/config.go index c8c99a31e..5631e8e5e 100644 --- a/common/config.go +++ b/common/config.go @@ -5,6 +5,7 @@ import ( "net/url" "os" "path/filepath" + "regexp" "strings" "time" ) @@ -42,58 +43,98 @@ func ChooseString(vals ...string) string { return "" } +// SupportedURL verifies that the url passed is actually supported or not +// This will also validate that the protocol is one that's actually implemented. +func SupportedURL(u *url.URL) bool { + // url.Parse shouldn't return nil except on error....but it can. + if u == nil { + return false + } + + // build a dummy NewDownloadClient since this is the only place that valid + // protocols are actually exposed. + cli := NewDownloadClient(&DownloadConfig{}) + + // Iterate through each downloader to see if a protocol was found. + ok := false + for scheme, _ := range cli.config.DownloaderMap { + if strings.ToLower(u.Scheme) == strings.ToLower(scheme) { + ok = true + } + } + return ok +} + // DownloadableURL processes a URL that may also be a file path and returns -// a completely valid URL. For example, the original URL might be "local/file.iso" -// which isn't a valid URL. DownloadableURL will return "file:///local/file.iso" +// a completely valid URL representing the requested file. For example, +// the original URL might be "local/file.iso" which isn't a valid URL, +// and so DownloadableURL will return "file://local/file.iso" +// No other transformations are done to the path. func DownloadableURL(original string) (string, error) { + var result string - // Verify that the scheme is something we support in our common downloader. - supported := []string{"file", "http", "https", "smb"} - found := false - for _, s := range supported { - if strings.HasPrefix(strings.ToLower(original), s+"://") { - found = true - break - } + // Fix the url if it's using bad characters commonly mistaken with a path. + original = filepath.ToSlash(original) + + // Check to see that this is a parseable URL with a scheme. If so, then just pass it through. + if u, err := url.Parse(original); err == nil && u.Scheme != "" && u.Host != "" { + return filepath.ToSlash(original), nil } - // If it's properly prefixed with something we support, then we don't need - // to make it a uri. - if found { - original = filepath.ToSlash(original) - - // make sure that it can be parsed though.. - uri, err := url.Parse(original) + // Since it's not a url, this might be a path. So, check that the file exists, + // then make it an absolute path so we can make a proper uri. + if _, err := os.Stat(original); err == nil { + result, err = filepath.Abs(filepath.FromSlash(original)) if err != nil { return "", err } - uri.Scheme = strings.ToLower(uri.Scheme) - - return uri.String(), nil - } - - // If the file exists, then make it an absolute path - _, err := os.Stat(original) - if err == nil { - original, err = filepath.Abs(filepath.FromSlash(original)) + result, err = filepath.EvalSymlinks(result) if err != nil { return "", err } - original, err = filepath.EvalSymlinks(original) - if err != nil { - return "", err - } + result = filepath.Clean(result) + result = filepath.ToSlash(result) - original = filepath.Clean(original) - original = filepath.ToSlash(original) + // We have no idea what this might be, so we'll leave it as is. + } else { + result = filepath.ToSlash(original) } - // Since it wasn't properly prefixed, let's make it into a well-formed - // file:// uri. + // We should have a path that can just turn into a file:// scheme'd url. + return fmt.Sprintf("file://%s", result), nil +} - return "file://" + original, nil +// Force the parameter into a url. This will transform the parameter into +// a proper url, removing slashes, adding the proper prefix, etc. +func ValidatedURL(original string) (string, error) { + + // See if the user failed to give a url + if ok, _ := regexp.MatchString("(?m)^[^[:punct:]]+://", original); !ok { + + // So since no magic was found, this must be a path. + result, err := DownloadableURL(original) + if err == nil { + return ValidatedURL(result) + } + + return "", err + } + + // Verify that the url is parseable...just in case. + u, err := url.Parse(original) + if err != nil { + return "", err + } + + // We should now have a url, so verify that it's a protocol we support. + if !SupportedURL(u) { + return "", fmt.Errorf("Unsupported protocol scheme! (%#v)", u) + } + + // We should now have a properly formatted and supported url + return u.String(), nil } // FileExistsLocally takes the URL output from DownloadableURL, and determines diff --git a/common/config_test.go b/common/config_test.go index da93a19ef..2eaa5a998 100644 --- a/common/config_test.go +++ b/common/config_test.go @@ -36,15 +36,21 @@ func TestChooseString(t *testing.T) { } } -func TestDownloadableURL(t *testing.T) { +func TestValidatedURL(t *testing.T) { // Invalid URL: has hex code in host - _, err := DownloadableURL("http://what%20.com") + _, err := ValidatedURL("http://what%20.com") + if err == nil { + t.Fatalf("expected err : %s", err) + } + + // Invalid: unsupported scheme + _, err = ValidatedURL("ftp://host.com/path") if err == nil { t.Fatalf("expected err : %s", err) } // Valid: http - u, err := DownloadableURL("HTTP://packer.io/path") + u, err := ValidatedURL("HTTP://packer.io/path") if err != nil { t.Fatalf("err: %s", err) } @@ -69,7 +75,7 @@ func TestDownloadableURL(t *testing.T) { } for _, tc := range cases { - u, err := DownloadableURL(tc.InputString) + u, err := ValidatedURL(tc.InputString) if u != tc.OutputURL { t.Fatal(fmt.Sprintf("Error with URL %s: got %s but expected %s", tc.InputString, tc.OutputURL, u)) @@ -77,11 +83,11 @@ func TestDownloadableURL(t *testing.T) { if (err != nil) != tc.ErrExpected { if tc.ErrExpected == true { t.Fatal(fmt.Sprintf("Error with URL %s: we expected "+ - "DownloadableURL to return an error but didn't get one.", + "ValidatedURL to return an error but didn't get one.", tc.InputString)) } else { t.Fatal(fmt.Sprintf("Error with URL %s: we did not expect an "+ - " error from DownloadableURL but we got: %s", + " error from ValidatedURL but we got: %s", tc.InputString, err)) } } diff --git a/common/download.go b/common/download.go index fb7ce5584..6d3d2a685 100644 --- a/common/download.go +++ b/common/download.go @@ -86,8 +86,6 @@ func NewDownloadClient(c *DownloadConfig) *DownloadClient { // Create downloader map if it hasn't been specified already. if c.DownloaderMap == nil { - // XXX: Make sure you add any new protocols you implement - // to the DownloadableURL implementation in config.go! c.DownloaderMap = map[string]Downloader{ "file": &FileDownloader{bufferSize: nil}, "http": &HTTPDownloader{userAgent: c.UserAgent}, diff --git a/common/iso_config.go b/common/iso_config.go index 5216dc458..e5a1e4e90 100644 --- a/common/iso_config.go +++ b/common/iso_config.go @@ -111,7 +111,7 @@ func (c *ISOConfig) Prepare(ctx *interpolate.Context) (warnings []string, errs [ c.ISOChecksum = strings.ToLower(c.ISOChecksum) for i, url := range c.ISOUrls { - url, err := DownloadableURL(url) + url, err := ValidatedURL(url) if err != nil { errs = append( errs, fmt.Errorf("Failed to parse iso_url %d: %s", i+1, err)) From 50e9cd2ca75c9387113cea136ca06b29fae098cf Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Tue, 16 Jan 2018 13:46:27 -0600 Subject: [PATCH 058/251] Initial fixes of common/config.go after rebase before refactoring of test-cases so that they don't require root to run. --- common/config.go | 1 + common/config_test.go | 1 + 2 files changed, 2 insertions(+) diff --git a/common/config.go b/common/config.go index 5631e8e5e..c79710221 100644 --- a/common/config.go +++ b/common/config.go @@ -8,6 +8,7 @@ import ( "regexp" "strings" "time" + "runtime" ) // PackerKeyEnv is used to specify the key interval (delay) between keystrokes diff --git a/common/config_test.go b/common/config_test.go index 2eaa5a998..af26aeefa 100644 --- a/common/config_test.go +++ b/common/config_test.go @@ -7,6 +7,7 @@ import ( "path/filepath" "strings" "testing" + "runtime" ) func TestChooseString(t *testing.T) { From 95f60f61531916832e900d4906b675d2dd3f27f5 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 18 Jan 2018 22:43:08 -0600 Subject: [PATCH 059/251] Modified common/config.go to accommodate some of the new DownloadableURL policies made by the PR #5761 merge. common/config.go: Added the ability for DownloadableURL to promote UNC paths to the SMB uri. Modified DownloadableURL to include the "./" prefix when a relative path is passed to it. Fix-up the DownloadableURL argument if on windows and incorrectly prefixed with "/". --- common/config.go | 50 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/common/config.go b/common/config.go index c79710221..a3723b40a 100644 --- a/common/config.go +++ b/common/config.go @@ -74,18 +74,40 @@ func SupportedURL(u *url.URL) bool { func DownloadableURL(original string) (string, error) { var result string + // Check that the user specified a UNC path, and promote it to an smb:// uri. + if strings.HasPrefix(original, "\\\\") && len(original) > 2 && original[2] != '?' { + result = filepath.ToSlash(original[2:]) + return fmt.Sprintf("smb://%s", result), nil + } + // Fix the url if it's using bad characters commonly mistaken with a path. original = filepath.ToSlash(original) - // Check to see that this is a parseable URL with a scheme. If so, then just pass it through. + // Check to see that this is a parseable URL with a scheme and a host. + // If so, then just pass it through. if u, err := url.Parse(original); err == nil && u.Scheme != "" && u.Host != "" { - return filepath.ToSlash(original), nil + return original, nil } - // Since it's not a url, this might be a path. So, check that the file exists, - // then make it an absolute path so we can make a proper uri. - if _, err := os.Stat(original); err == nil { - result, err = filepath.Abs(filepath.FromSlash(original)) + // If it's a file scheme, then convert it back to a regular path so the next + // case which forces it to an absolute path, will correct it. + if u, err := url.Parse(original); err == nil && strings.ToLower(u.Scheme) == "file" { + original = u.Path + } + + // If we're on Windows and we start with a slash, then this absolute path + // is wrong. Fix it up, so the next case can figure out the absolute path. + if rpath := strings.SplitN(original, "/", 2); rpath[0] == "" && runtime.GOOS == "windows" { + result = rpath[1] + } else { + result = original + } + + // Since we should be some kind of path (relative or absolute), check + // that the file exists, then make it an absolute path so we can return an + // absolute uri. + if _, err := os.Stat(result); err == nil { + result, err = filepath.Abs(filepath.FromSlash(result)) if err != nil { return "", err } @@ -96,15 +118,17 @@ func DownloadableURL(original string) (string, error) { } result = filepath.Clean(result) - result = filepath.ToSlash(result) - - // We have no idea what this might be, so we'll leave it as is. - } else { - result = filepath.ToSlash(original) + return fmt.Sprintf("file:///%s", filepath.ToSlash(result)), nil } - // We should have a path that can just turn into a file:// scheme'd url. - return fmt.Sprintf("file://%s", result), nil + // Otherwise, check if it was originally an absolute path, and fix it if so. + if strings.HasPrefix(original, "/") { + return fmt.Sprintf("file:///%s", result), nil + } + + // Anything left should be a non-existent relative path. So fix it up here. + result = filepath.ToSlash(filepath.Clean(result)) + return fmt.Sprintf("file://./%s", result), nil } // Force the parameter into a url. This will transform the parameter into From 15079a99dc42560122e7afa592140f99ad8370e7 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 18 Jan 2018 23:08:22 -0600 Subject: [PATCH 060/251] Fixed common/config_test.go tests for DownloadableURL to avoid writing to disk on the windows platform. Also added tests for relative paths/uris. common/config_test.go: Replaced instances of os.Mkdir and os.Create with tests that use the existing "common/test-fixtures" mechanism. Removed the runtime.GOOS test for the "FileExistsLocally" test, as the functionality should work regardless of the platform. Added some more comprehensive tests for the relative uri/pathing. Replaced the Windows Object Manager name test as the Object Manager's naming scheme is different from a UNC path. Modified the FilePaths tests to support the policy of windows absolute paths being prefixed with the `/` introduced with PR #5761. --- common/config_test.go | 196 ++++++++++++++++++++++++++---------------- 1 file changed, 120 insertions(+), 76 deletions(-) diff --git a/common/config_test.go b/common/config_test.go index af26aeefa..55fb69a85 100644 --- a/common/config_test.go +++ b/common/config_test.go @@ -95,67 +95,116 @@ func TestValidatedURL(t *testing.T) { } } +func GetNativePathToTestFixtures(t *testing.T) string { + const path = "./test-fixtures" + res, err := filepath.Abs(path) + if err != nil { + t.Fatalf("err converting test-fixtures path into an absolute path : %s", err) + } + return res +} + +func GetPortablePathToTestFixtures(t *testing.T) string { + res := GetNativePathToTestFixtures(t) + return filepath.ToSlash(res) +} + func TestDownloadableURL_WindowsFiles(t *testing.T) { if runtime.GOOS == "windows" { + portablepath := GetPortablePathToTestFixtures(t) + nativepath := GetNativePathToTestFixtures(t) + dirCases := []struct { InputString string OutputURL string ErrExpected bool }{ // TODO: add different directories { - "C:\\Temp\\SomeDir\\myfile.txt", - "file:///C:/Temp/SomeDir/myfile.txt", + fmt.Sprintf("%s\\SomeDir\\myfile.txt", nativepath), + fmt.Sprintf("file:///%s/SomeDir/myfile.txt", portablepath), false, }, - { // need windows drive - "\\Temp\\SomeDir\\myfile.txt", - "", - true, - }, - { // need windows drive - "/Temp/SomeDir/myfile.txt", - "", - true, - }, - { // UNC paths; why not? - "\\\\?\\c:\\Temp\\SomeDir\\myfile.txt", - "", - true, - }, - { - "file:///C:\\Temp\\SomeDir\\myfile.txt", - "file:///c:/Temp/SomeDir/myfile.txt", + { // without the drive makes this native path a relative file:// uri + "test-fixtures\\SomeDir\\myfile.txt", + fmt.Sprintf("file:///%s/SomeDir/myfile.txt", portablepath), false, }, - { - "file:///c:/Temp/Somedir/myfile.txt", - "file:///c:/Temp/SomeDir/myfile.txt", + { // without the drive makes this native path a relative file:// uri + "test-fixtures/SomeDir/myfile.txt", + fmt.Sprintf("file:///%s/SomeDir/myfile.txt", portablepath), + false, + }, + { // UNC paths being promoted to smb:// uri scheme. + fmt.Sprintf("\\\\localhost\\C$\\%s\\SomeDir\\myfile.txt", nativepath), + fmt.Sprintf("smb://localhost/C$/%s/SomeDir/myfile.txt", portablepath), + false, + }, + { // Absolute uri (incorrect slash type) + fmt.Sprintf("file:///%s\\SomeDir\\myfile.txt", nativepath), + fmt.Sprintf("file:///%s/SomeDir/myfile.txt", portablepath), + false, + }, + { // Absolute uri (existing and mis-spelled) + fmt.Sprintf("file:///%s/Somedir/myfile.txt", nativepath), + fmt.Sprintf("file:///%s/SomeDir/myfile.txt", portablepath), + false, + }, + { // Absolute path (non-existing) + "\\absolute\\path\\to\\non-existing\\file.txt", + "file:///absolute/path/to/non-existing/file.txt", + false, + }, + { // Absolute paths (existing) + fmt.Sprintf("%s/SomeDir/myfile.txt", nativepath), + fmt.Sprintf("file:///%s/SomeDir/myfile.txt", portablepath), + false, + }, + { // Relative path (non-existing) + "./nonexisting/relative/path/to/file.txt", + "file://./nonexisting/relative/path/to/file.txt", + false, + }, + { // Relative path (existing) + "./test-fixtures/SomeDir/myfile.txt", + fmt.Sprintf("file:///%s/SomeDir/myfile.txt", portablepath), + false, + }, + { // Absolute uri (existing and with `/` prefix) + fmt.Sprintf("file:///%s/SomeDir/myfile.txt", portablepath), + fmt.Sprintf("file:///%s/SomeDir/myfile.txt", portablepath), + false, + }, + { // Absolute uri (non-existing and with `/` prefix) + "file:///path/to/non-existing/file.txt", + "file:///path/to/non-existing/file.txt", + false, + }, + { // Absolute uri (non-existing and missing `/` prefix) + "file://path/to/non-existing/file.txt", + "file://path/to/non-existing/file.txt", + false, + }, + { // Absolute uri and volume (non-existing and with `/` prefix) + "file:///T:/path/to/non-existing/file.txt", + "file:///T:/path/to/non-existing/file.txt", + false, + }, + { // Absolute uri and volume (non-existing and missing `/` prefix) + "file://T:/path/to/non-existing/file.txt", + "file://T:/path/to/non-existing/file.txt", false, }, } - // create absolute-pathed tempfile to play with - err := os.Mkdir("C:\\Temp\\SomeDir", 0755) - if err != nil { - t.Fatalf("err creating test dir: %s", err) - } - fi, err := os.Create("C:\\Temp\\SomeDir\\myfile.txt") - if err != nil { - t.Fatalf("err creating test file: %s", err) - } - fi.Close() - defer os.Remove("C:\\Temp\\SomeDir\\myfile.txt") - defer os.Remove("C:\\Temp\\SomeDir") - // Run through test cases to make sure they all parse correctly - for _, tc := range dirCases { + for idx, tc := range dirCases { u, err := DownloadableURL(tc.InputString) if (err != nil) != tc.ErrExpected { - t.Fatalf("Test Case failed: Expected err = %#v, err = %#v, input = %s", - tc.ErrExpected, err, tc.InputString) + t.Fatalf("Test Case %d failed: Expected err = %#v, err = %#v, input = %s", + idx, tc.ErrExpected, err, tc.InputString) } if u != tc.OutputURL { - t.Fatalf("Test Case failed: Expected %s but received %s from input %s", - tc.OutputURL, u, tc.InputString) + t.Fatalf("Test Case %d failed: Expected %s but received %s from input %s", + idx, tc.OutputURL, u, tc.InputString) } } } @@ -177,6 +226,12 @@ func TestDownloadableURL_FilePaths(t *testing.T) { tfPath = filepath.Clean(tfPath) filePrefix := "file://" + // If we're running windows, then absolute URIs are `/`-prefixed. + platformPrefix := "" + if runtime.GOOS == "windows" { + platformPrefix = "/" + } + // Relative filepath. We run this test in a func so that // the defers run right away. func() { @@ -197,8 +252,9 @@ func TestDownloadableURL_FilePaths(t *testing.T) { t.Fatalf("err: %s", err) } - expected := fmt.Sprintf("%s%s", + expected := fmt.Sprintf("%s%s%s", filePrefix, + platformPrefix, strings.Replace(tfPath, `\`, `/`, -1)) if u != expected { t.Fatalf("unexpected: %#v != %#v", u, expected) @@ -206,21 +262,22 @@ func TestDownloadableURL_FilePaths(t *testing.T) { }() // Test some cases with and without a schema prefix - for _, prefix := range []string{"", filePrefix} { + for _, prefix := range []string{"", filePrefix + platformPrefix} { // Nonexistent file _, err = DownloadableURL(prefix + "i/dont/exist") if err != nil { t.Fatalf("err: %s", err) } - // Good file + // Good file (absolute) u, err := DownloadableURL(prefix + tfPath) if err != nil { t.Fatalf("err: %s", err) } - expected := fmt.Sprintf("%s%s", + expected := fmt.Sprintf("%s%s%s", filePrefix, + platformPrefix, strings.Replace(tfPath, `\`, `/`, -1)) if u != expected { t.Fatalf("unexpected: %s != %s", u, expected) @@ -229,38 +286,25 @@ func TestDownloadableURL_FilePaths(t *testing.T) { } func test_FileExistsLocally(t *testing.T) { - if runtime.GOOS == "windows" { - dirCases := []struct { - Input string - Output bool - }{ - // file exists locally - {"file:///C:/Temp/SomeDir/myfile.txt", true}, - // file is not supposed to exist locally - {"https://myfile.iso", true}, - // file does not exist locally - {"file:///C/i/dont/exist", false}, - } - // create absolute-pathed tempfile to play with - err := os.Mkdir("C:\\Temp\\SomeDir", 0755) - if err != nil { - t.Fatalf("err creating test dir: %s", err) - } - fi, err := os.Create("C:\\Temp\\SomeDir\\myfile.txt") - if err != nil { - t.Fatalf("err creating test file: %s", err) - } - fi.Close() - defer os.Remove("C:\\Temp\\SomeDir\\myfile.txt") - defer os.Remove("C:\\Temp\\SomeDir") + portablepath := GetPortablePathToTestFixtures(t) - // Run through test cases to make sure they all parse correctly - for _, tc := range dirCases { - fileOK := FileExistsLocally(tc.Input) - if !fileOK { - t.Fatalf("Test Case failed: Expected %#v, received = %#v, input = %s", - tc.Output, fileOK, tc.Input) - } + dirCases := []struct { + Input string + Output bool + }{ + // file exists locally + {fmt.Sprintf("file:///%s/SomeDir/myfile.txt", portablepath), true}, + // file is not supposed to exist locally + {"https://myfile.iso", true}, + // file does not exist locally + {"file:///C/i/dont/exist", false}, + } + // Run through test cases to make sure they all parse correctly + for _, tc := range dirCases { + fileOK := FileExistsLocally(tc.Input) + if !fileOK { + t.Fatalf("Test Case failed: Expected %#v, received = %#v, input = %s", + tc.Output, fileOK, tc.Input) } } } From 8a102a42a0131d8e02f35d2362f8038c101718c2 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 18 Jan 2018 23:09:53 -0600 Subject: [PATCH 061/251] gofmt -w on common/config{,_test}.go --- common/config.go | 2 +- common/config_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/common/config.go b/common/config.go index a3723b40a..a8ec7d16a 100644 --- a/common/config.go +++ b/common/config.go @@ -6,9 +6,9 @@ import ( "os" "path/filepath" "regexp" + "runtime" "strings" "time" - "runtime" ) // PackerKeyEnv is used to specify the key interval (delay) between keystrokes diff --git a/common/config_test.go b/common/config_test.go index 55fb69a85..35697291d 100644 --- a/common/config_test.go +++ b/common/config_test.go @@ -5,9 +5,9 @@ import ( "io/ioutil" "os" "path/filepath" + "runtime" "strings" "testing" - "runtime" ) func TestChooseString(t *testing.T) { From 7cd5d576d9ab2fa82d4144a69ae381415e87360c Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 18 Jan 2018 23:18:55 -0600 Subject: [PATCH 062/251] Updated common/config.go's FileExistsLocally implementation to use the LocalDownloader interface for determining the real file path. --- common/config.go | 53 +++++++++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/common/config.go b/common/config.go index a8ec7d16a..e3fd4baf8 100644 --- a/common/config.go +++ b/common/config.go @@ -177,27 +177,38 @@ func ValidatedURL(original string) (string, error) { func FileExistsLocally(original string) bool { // original should be something like file://C:/my/path.iso + u, _ := url.Parse(original) - fileURL, _ := url.Parse(original) - fileExists := false - - if fileURL.Scheme == "file" { - // on windows, correct URI is file:///c:/blah/blah.iso. - // url.Parse will pull out the scheme "file://" and leave the path as - // "/c:/blah/blah/iso". Here we remove this forward slash on absolute - // Windows file URLs before processing - // see https://blogs.msdn.microsoft.com/ie/2006/12/06/file-uris-in-windows/ - // for more info about valid windows URIs - filePath := fileURL.Path - if runtime.GOOS == "windows" && len(filePath) > 0 && filePath[0] == '/' { - filePath = filePath[1:] - } - _, err := os.Stat(filePath) - if err != nil { - return fileExists - } else { - fileExists = true - } + // First create a dummy downloader so we can figure out which + // protocol to use. + cli := NewDownloadClient(&DownloadConfig{}) + d, ok := cli.config.DownloaderMap[u.Scheme] + if !ok { + return false } - return fileExists + + // Check to see that it's got a Local way of doing things. + local, ok := d.(LocalDownloader) + if !ok { + return false + } + + // Figure out where we're at. + wd, err := os.Getwd() + if err != nil { + return false + } + + // Now figure out the real path to the file. + realpath, err := local.toPath(wd, *u) + if err != nil { + return false + } + + // Finally we can seek the truth via os.Stat. + _, err = os.Stat(realpath) + if err != nil { + return false + } + return true } From 41f4dc3f3dfe41c71578b0a2b231abe7c0ac4f38 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 18 Jan 2018 23:33:44 -0600 Subject: [PATCH 063/251] umm...gofmt -w on common/config{,_test}.go from linux instead of windows(?) --- common/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/config.go b/common/config.go index e3fd4baf8..2ad5b528e 100644 --- a/common/config.go +++ b/common/config.go @@ -58,7 +58,7 @@ func SupportedURL(u *url.URL) bool { // Iterate through each downloader to see if a protocol was found. ok := false - for scheme, _ := range cli.config.DownloaderMap { + for scheme := range cli.config.DownloaderMap { if strings.ToLower(u.Scheme) == strings.ToLower(scheme) { ok = true } From f9572cb2447a6d334cc559545d94987a8b5a26b4 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 18 Jan 2018 23:48:20 -0600 Subject: [PATCH 064/251] Fixed a bug on linux related to forgetting to check the platform for the forward-slash prefix. --- common/config.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/common/config.go b/common/config.go index 2ad5b528e..fa9dba55b 100644 --- a/common/config.go +++ b/common/config.go @@ -123,7 +123,10 @@ func DownloadableURL(original string) (string, error) { // Otherwise, check if it was originally an absolute path, and fix it if so. if strings.HasPrefix(original, "/") { - return fmt.Sprintf("file:///%s", result), nil + if runtime.GOOS == "windows" { + return fmt.Sprintf("file:///%s", result), nil + } + return fmt.Sprintf("file://%s", result), nil } // Anything left should be a non-existent relative path. So fix it up here. From 97fc9c02a50bb264d1523434524f593e3d155369 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 18 Jan 2018 23:58:24 -0600 Subject: [PATCH 065/251] Grr...missed the case that actually mattered on linux. --- common/config.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/common/config.go b/common/config.go index fa9dba55b..6850005d7 100644 --- a/common/config.go +++ b/common/config.go @@ -72,7 +72,12 @@ func SupportedURL(u *url.URL) bool { // and so DownloadableURL will return "file://local/file.iso" // No other transformations are done to the path. func DownloadableURL(original string) (string, error) { - var result string + var absPrefix, result string + + absPrefix = "" + if runtime.GOOS == "windows" { + absPrefix = "/" + } // Check that the user specified a UNC path, and promote it to an smb:// uri. if strings.HasPrefix(original, "\\\\") && len(original) > 2 && original[2] != '?' { @@ -118,15 +123,12 @@ func DownloadableURL(original string) (string, error) { } result = filepath.Clean(result) - return fmt.Sprintf("file:///%s", filepath.ToSlash(result)), nil + return fmt.Sprintf("file://%s%s", absPrefix, filepath.ToSlash(result)), nil } // Otherwise, check if it was originally an absolute path, and fix it if so. if strings.HasPrefix(original, "/") { - if runtime.GOOS == "windows" { - return fmt.Sprintf("file:///%s", result), nil - } - return fmt.Sprintf("file://%s", result), nil + return fmt.Sprintf("file://%s%s", absPrefix, result), nil } // Anything left should be a non-existent relative path. So fix it up here. From 543caf3ec575621dd9210a6bd33116193d9da934 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 12 Jan 2018 14:12:15 -0800 Subject: [PATCH 066/251] WIP OCI Classic builder --- builder/oracle/classic/access_config.go | 4 + builder/oracle/classic/artifact.go | 34 ++++++++ builder/oracle/classic/builder.go | 87 +++++++++++++++++++ builder/oracle/classic/config.go | 44 ++++++++++ .../{oci => common}/step_ssh_key_pair.go | 8 +- builder/oracle/oci/builder.go | 3 +- 6 files changed, 175 insertions(+), 5 deletions(-) create mode 100644 builder/oracle/classic/access_config.go create mode 100644 builder/oracle/classic/artifact.go create mode 100644 builder/oracle/classic/builder.go create mode 100644 builder/oracle/classic/config.go rename builder/oracle/{oci => common}/step_ssh_key_pair.go (94%) diff --git a/builder/oracle/classic/access_config.go b/builder/oracle/classic/access_config.go new file mode 100644 index 000000000..99483ceec --- /dev/null +++ b/builder/oracle/classic/access_config.go @@ -0,0 +1,4 @@ +package classic + +type AccessConfig struct { +} diff --git a/builder/oracle/classic/artifact.go b/builder/oracle/classic/artifact.go new file mode 100644 index 000000000..ff01b186f --- /dev/null +++ b/builder/oracle/classic/artifact.go @@ -0,0 +1,34 @@ +package classic + +// Artifact is an artifact implementation that contains a built Custom Image. +type Artifact struct { +} + +// BuilderId uniquely identifies the builder. +func (a *Artifact) BuilderId() string { + return BuilderId +} + +// Files lists the files associated with an artifact. We don't have any files +// as the custom image is stored server side. +func (a *Artifact) Files() []string { + return nil +} + +// Id returns the OCID of the associated Image. +func (a *Artifact) Id() string { + return "" +} + +func (a *Artifact) String() string { + return "" +} + +func (a *Artifact) State(name string) interface{} { + return nil +} + +// Destroy deletes the custom image associated with the artifact. +func (a *Artifact) Destroy() error { + return nil +} diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go new file mode 100644 index 000000000..4b86c449c --- /dev/null +++ b/builder/oracle/classic/builder.go @@ -0,0 +1,87 @@ +package classic + +import ( + "fmt" + "log" + + ocommon "github.com/hashicorp/packer/builder/oracle/common" + "github.com/hashicorp/packer/common" + "github.com/hashicorp/packer/helper/communicator" + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" +) + +// BuilderId uniquely identifies the builder +const BuilderId = "packer.oracle.classic" + +// Builder is a builder implementation that creates Oracle OCI custom images. +type Builder struct { + config *Config + runner multistep.Runner +} + +func (b *Builder) Prepare(rawConfig ...interface{}) ([]string, error) { + config, err := NewConfig(rawConfig...) + if err != nil { + return nil, err + } + b.config = config + + return nil, nil +} + +func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { + // Populate the state bag + state := new(multistep.BasicStateBag) + state.Put("config", b.config) + state.Put("hook", hook) + state.Put("ui", ui) + + // Build the steps + steps := []multistep.Step{ + &ocommon.StepKeyPair{ + Debug: b.config.PackerDebug, + DebugKeyPath: fmt.Sprintf("oci_classic_%s.pem", b.config.PackerBuildName), + PrivateKeyFile: b.config.Comm.SSHPrivateKey, + }, + &stepCreateInstance{}, + &communicator.StepConnect{ + Config: &b.config.Comm, + Host: commHost, + SSHConfig: SSHConfig( + b.config.Comm.SSHUsername, + b.config.Comm.SSHPassword), + }, + &common.StepProvision{}, + &stepSnapshot{}, + } + + // Run the steps + b.runner = common.NewRunner(steps, b.config.PackerConfig, ui) + b.runner.Run(state) + + // If there was an error, return that + if rawErr, ok := state.GetOk("error"); ok { + return nil, rawErr.(error) + } + + /* + // Build the artifact and return it + artifact := &Artifact{ + Image: state.Get("image").(client.Image), + Region: b.config.AccessCfg.Region, + driver: driver, + } + + return artifact, nil + */ + return nil, nil +} + +// Cancel terminates a running build. +func (b *Builder) Cancel() { + if b.runner != nil { + log.Println("Cancelling the step runner...") + b.runner.Cancel() + } +} diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go new file mode 100644 index 000000000..6a3c05fc8 --- /dev/null +++ b/builder/oracle/classic/config.go @@ -0,0 +1,44 @@ +package classic + +import ( + "fmt" + + "github.com/hashicorp/packer/common" + "github.com/hashicorp/packer/helper/communicator" + "github.com/hashicorp/packer/helper/config" + "github.com/hashicorp/packer/template/interpolate" +) + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + Comm communicator.Config `mapstructure:",squash"` + + Access *AccessConfig + + // Access config overrides + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + IdentityDomain string `mapstructure:"identity_domain"` + APIEndpoint string `mapstructure:"api_endpoint"` + + // Image + Shape string `mapstructure:"shape"` + ImageList string `json:"image_list"` + + ctx interpolate.Context +} + +func NewConfig(raws ...interface{}) (*Config, error) { + c := &Config{} + + // Decode from template + err := config.Decode(c, &config.DecodeOpts{ + Interpolate: true, + InterpolateContext: &c.ctx, + }, raws...) + if err != nil { + return nil, fmt.Errorf("Failed to mapstructure Config: %+v", err) + } + + return c, nil +} diff --git a/builder/oracle/oci/step_ssh_key_pair.go b/builder/oracle/common/step_ssh_key_pair.go similarity index 94% rename from builder/oracle/oci/step_ssh_key_pair.go rename to builder/oracle/common/step_ssh_key_pair.go index 9fa83c17d..c545a09e2 100644 --- a/builder/oracle/oci/step_ssh_key_pair.go +++ b/builder/oracle/common/step_ssh_key_pair.go @@ -1,4 +1,4 @@ -package oci +package common import ( "context" @@ -16,13 +16,13 @@ import ( "golang.org/x/crypto/ssh" ) -type stepKeyPair struct { +type StepKeyPair struct { Debug bool DebugKeyPath string PrivateKeyFile string } -func (s *stepKeyPair) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { +func (s *StepKeyPair) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) if s.PrivateKeyFile != "" { @@ -112,6 +112,6 @@ func (s *stepKeyPair) Run(_ context.Context, state multistep.StateBag) multistep return multistep.ActionContinue } -func (s *stepKeyPair) Cleanup(state multistep.StateBag) { +func (s *StepKeyPair) Cleanup(state multistep.StateBag) { // Nothing to do } diff --git a/builder/oracle/oci/builder.go b/builder/oracle/oci/builder.go index 6a2f6efef..07d41b756 100644 --- a/builder/oracle/oci/builder.go +++ b/builder/oracle/oci/builder.go @@ -6,6 +6,7 @@ import ( "fmt" "log" + ocommon "github.com/hashicorp/packer/builder/oracle/common" client "github.com/hashicorp/packer/builder/oracle/oci/client" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/communicator" @@ -50,7 +51,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe // Build the steps steps := []multistep.Step{ - &stepKeyPair{ + &ocommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("oci_%s.pem", b.config.PackerBuildName), PrivateKeyFile: b.config.Comm.SSHPrivateKey, From 9e8d845c03e7539c18bb8ad3d2ce357cd565382c Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 12 Jan 2018 14:19:13 -0800 Subject: [PATCH 067/251] create instance reservation --- builder/oracle/classic/builder.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go index 4b86c449c..5fe719567 100644 --- a/builder/oracle/classic/builder.go +++ b/builder/oracle/classic/builder.go @@ -44,6 +44,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe DebugKeyPath: fmt.Sprintf("oci_classic_%s.pem", b.config.PackerBuildName), PrivateKeyFile: b.config.Comm.SSHPrivateKey, }, + &stepCreateIPReservation{}, &stepCreateInstance{}, &communicator.StepConnect{ Config: &b.config.Comm, From 3bf431a4238b8c1f38679214acc0f6a5e9881bc1 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 12 Jan 2018 15:44:40 -0800 Subject: [PATCH 068/251] construct OCI client --- builder/oracle/classic/builder.go | 25 +- builder/oracle/classic/config.go | 7 + .../go-oracle-terraform/CHANGELOG.md | 134 +++++++ .../hashicorp/go-oracle-terraform/GNUmakefile | 41 ++ .../hashicorp/go-oracle-terraform/LICENSE | 373 ++++++++++++++++++ .../hashicorp/go-oracle-terraform/README.md | 108 +++++ vendor/vendor.json | 6 + 7 files changed, 692 insertions(+), 2 deletions(-) create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/GNUmakefile create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/LICENSE create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/README.md diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go index 5fe719567..b8af96345 100644 --- a/builder/oracle/classic/builder.go +++ b/builder/oracle/classic/builder.go @@ -4,9 +4,10 @@ import ( "fmt" "log" - ocommon "github.com/hashicorp/packer/builder/oracle/common" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/go-oracle-terraform/opc" "github.com/hashicorp/packer/common" - "github.com/hashicorp/packer/helper/communicator" "github.com/hashicorp/packer/packer" "github.com/mitchellh/multistep" ) @@ -31,14 +32,33 @@ func (b *Builder) Prepare(rawConfig ...interface{}) ([]string, error) { } func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { + + httpClient := cleanhttp.DefaultClient() + config := &opc.Config{ + Username: opc.String(b.config.Username), + Password: opc.String(b.config.Password), + IdentityDomain: opc.String(b.config.IdentityDomain), + APIEndpoint: b.config.apiEndpointURL, + LogLevel: opc.LogDebug, + // Logger: # Leave blank to use the default logger, or provide your own + HTTPClient: httpClient, + } + // Create the Compute Client + client, err := compute.NewComputeClient(config) + if err != nil { + return nil, fmt.Errorf("Error creating OPC Compute Client: %s", err) + } + // Populate the state bag state := new(multistep.BasicStateBag) state.Put("config", b.config) state.Put("hook", hook) state.Put("ui", ui) + state.Put("client", client) // Build the steps steps := []multistep.Step{ + /* &ocommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("oci_classic_%s.pem", b.config.PackerBuildName), @@ -55,6 +75,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, &common.StepProvision{}, &stepSnapshot{}, + */ } // Run the steps diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index 6a3c05fc8..050f78d91 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -2,6 +2,7 @@ package classic import ( "fmt" + "net/url" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/communicator" @@ -20,6 +21,7 @@ type Config struct { Password string `mapstructure:"password"` IdentityDomain string `mapstructure:"identity_domain"` APIEndpoint string `mapstructure:"api_endpoint"` + apiEndpointURL *url.URL // Image Shape string `mapstructure:"shape"` @@ -40,5 +42,10 @@ func NewConfig(raws ...interface{}) (*Config, error) { return nil, fmt.Errorf("Failed to mapstructure Config: %+v", err) } + c.apiEndpointURL, err = url.Parse(c.APIEndpoint) + if err != nil { + return nil, fmt.Errorf("Error parsing API Endpoint: %s", err) + } + return c, nil } diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/CHANGELOG.md b/vendor/github.com/hashicorp/go-oracle-terraform/CHANGELOG.md new file mode 100644 index 000000000..70e5b3c31 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/CHANGELOG.md @@ -0,0 +1,134 @@ +## 0.6.6 (January 11, 2018) + +* compute: Create and delete machine images [GH-101] + +## 0.6.5 (January 8, 2018) + +* compute: Orchestration failures should explicitly tell the user why it failed [GH-100] + +## 0.6.4 (Decemeber 20, 2017) + +* compute: Added suspend functionality to orchestrated instances [GH-99] + +## 0.6.3 (December 13, 2017) + +* storage: Added remove header option to storage objects and containers [GH-96] + +## 0.6.2 (November 28, 2017) + +* client: Added a UserAgent to the Client [GH-98] + +## 0.6.1 (Novemeber 26, 2017) + +* compute: Added is_default_gateway to network attributes for instances [GH-97] + + +## 0.6.0 (November 10, 2017) + +* compute: Added is_default_gateway to network attributes for instances [GH-90] + +* compute: Added the orchestration resource, specifically for instance creation [GH-91] + +## 0.5.1 (October 5, 2017) + +* java: Fixed subscription_type field + +## 0.5.0 (October 5, 2017) + +* java: Added more fields to java service instance [GH-89] + +## 0.4.0 (September 14, 2017) + +* database: Add utility resources [GH-87] + +* compute: Increase storage volume snapshot create timeout [GH-88] + +## 0.3.4 (August 16, 2017) + +* storage_volumes: Actually capture errors during a storage volume create ([#86](https://github.com/hashicorp/go-oracle-terraform/issues/86)) + +## 0.3.3 (August 10, 2017) + +* Add `ExposedHeaders` to storage containers ([#85](https://github.com/hashicorp/go-oracle-terraform/issues/85)) + +* Fixed `AllowedOrigins` in storage containers ([#85](https://github.com/hashicorp/go-oracle-terraform/issues/85)) + +## 0.3.2 (August 7, 2017) + +* Add `id` for storage objects ([#84](https://github.com/hashicorp/go-oracle-terraform/issues/84)) + +## 0.3.1 (August 7, 2017) + +* Update tests for Database parameter changes ([#83](https://github.com/hashicorp/go-oracle-terraform/issues/83)) + +## 0.3.0 (August 7, 2017) + + * Add JaaS Service Instances ([#82](https://github.com/hashicorp/go-oracle-terraform/issues/82)) + + * Add storage objects ([#81](https://github.com/hashicorp/go-oracle-terraform/issues/81)) + +## 0.2.0 (July 27, 2017) + + * service_instance: Switches yes/no strings to bool in input struct and then converts back to strings for ease of use on user end ([#80](https://github.com/hashicorp/go-oracle-terraform/issues/80)) + +## 0.1.9 (July 20, 2017) + + * service_instance: Update delete retry count ([#79](https://github.com/hashicorp/go-oracle-terraform/issues/79)) + + * service_instance: Add additional fields ([#79](https://github.com/hashicorp/go-oracle-terraform/issues/79)) + +## 0.1.8 (July 19, 2017) + + * storage_volumes: Add SSD support ([#78](https://github.com/hashicorp/go-oracle-terraform/issues/78)) + +## 0.1.7 (July 19, 2017) + + * database: Adds the Oracle Database Cloud to the available sdks. ([#77](https://github.com/hashicorp/go-oracle-terraform/issues/77)) + + * database: Adds Service Instances to the database sdk ([#77](https://github.com/hashicorp/go-oracle-terraform/issues/77)) + +## 0.1.6 (July 18, 2017) + + * opc: Add timeouts to instance and storage inputs ([#75](https://github.com/hashicorp/go-oracle-terraform/issues/75)) + +## 0.1.5 (July 5, 2017) + + * storage: User must pass in Storage URL to CRUD resources ([#74](https://github.com/hashicorp/go-oracle-terraform/issues/74)) + +## 0.1.4 (June 30, 2017) + + * opc: Fix infinite loop around auth token exceeding it's 25 minute duration. ([#73](https://github.com/hashicorp/go-oracle-terraform/issues/73)) + +## 0.1.3 (June 30, 2017) + + * opc: Add additional logs instance logs ([#72](https://github.com/hashicorp/go-oracle-terraform/issues/72)) + + * opc: Increase instance creation and deletion timeout ([#72](https://github.com/hashicorp/go-oracle-terraform/issues/72)) + +## 0.1.2 (June 30, 2017) + + +FEATURES: + + * opc: Add image snapshots ([#67](https://github.com/hashicorp/go-oracle-terraform/issues/67)) + + * storage: Storage containers have been added ([#70](https://github.com/hashicorp/go-oracle-terraform/issues/70)) + + +IMPROVEMENTS: + + * opc: Refactored client to be generic for multiple Oracle api endpoints ([#68](https://github.com/hashicorp/go-oracle-terraform/issues/68)) + + * opc: Instance creation retries when an instance enters a deleted state ([#71](https://github.com/hashicorp/go-oracle-terraform/issues/71)) + +## 0.1.1 (May 31, 2017) + +IMPROVEMENTS: + + * opc: Add max_retries capabilities ([#66](https://github.com/hashicorp/go-oracle-terraform/issues/66)) + +## 0.1.0 (May 25, 2017) + +BACKWARDS INCOMPATIBILITIES / NOTES: + + * Initial Release of OPC SDK diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/GNUmakefile b/vendor/github.com/hashicorp/go-oracle-terraform/GNUmakefile new file mode 100644 index 000000000..9c5a0e2f2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/GNUmakefile @@ -0,0 +1,41 @@ +TEST?=$$(go list ./... |grep -v 'vendor') +GOFMT_FILES?=$$(find . -name '*.go' |grep -v vendor) + +test: fmtcheck errcheck + go test -i $(TEST) || exit 1 + echo $(TEST) | \ + xargs -t -n4 go test $(TESTARGS) -timeout=60m -parallel=4 + +testacc: fmtcheck + ORACLE_ACC=1 go test -v $(TEST) $(TESTARGS) -timeout 120m + +testrace: fmtcheck + ORACLE_ACC= go test -race $(TEST) $(TESTARGS) + +cover: + @go tool cover 2>/dev/null; if [ $$? -eq 3 ]; then \ + go get -u golang.org/x/tools/cmd/cover; \ + fi + go test $(TEST) -coverprofile=coverage.out + go tool cover -html=coverage.out + rm coverage.out + +vet: + @echo "go vet ." + @go vet $$(go list ./... | grep -v vendor/) ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +fmt: + gofmt -w $(GOFMT_FILES) + +fmtcheck: + @sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" + +errcheck: + @sh -c "'$(CURDIR)/scripts/errcheck.sh'" + +.PHONY: tools build test testacc testrace cover vet fmt fmtcheck errcheck diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/LICENSE b/vendor/github.com/hashicorp/go-oracle-terraform/LICENSE new file mode 100644 index 000000000..a612ad981 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/README.md b/vendor/github.com/hashicorp/go-oracle-terraform/README.md new file mode 100644 index 000000000..0accc2c32 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/README.md @@ -0,0 +1,108 @@ +Oracle SDK for Terraform +=========================================== + +**Note:** This SDK is _not_ meant to be a comprehensive SDK for Oracle Cloud. This is meant to be used solely with Terraform. + +OPC Config +---------- + +To create the Oracle clients, a populated configuration struct is required. +The config struct holds the following fields: + +* `Username` - (`*string`) The Username used to authenticate to Oracle Public Cloud. +* `Password` - (`*string`) The Password used to authenticate to Oracle Public Cloud. +* `IdentityDomain` - (`*string`) The identity domain for Oracle Public Cloud. +* `APIEndpoint` - (`*url.URL`) The API Endpoint provided by Oracle Public Cloud. +* `LogLevel` - (`LogLevelType`) Defaults to `opc.LogOff`, can be either `opc.LogOff` or `opc.LogDebug`. +* `Logger` - (`Logger`) Must satisfy the generic `Logger` interface. Defaults to `ioutil.Discard` for the `LogOff` loglevel, and `os.Stderr` for the `LogDebug` loglevel. +* `HTTPClient` - (`*http.Client`) Defaults to generic HTTP Client if unspecified. + +Oracle Compute Client +---------------------- +The Oracle Compute Client requires an OPC Config object to be populated in order to create the client. + +Full example to create an OPC Compute instance: +```go +package main + +import ( + "fmt" + "net/url" + "github.com/hashicorp/go-oracle-terraform/opc" + "github.com/hashicorp/go-oracle-terraform/compute" +) + +func main() { + apiEndpoint, err := url.Parse("myAPIEndpoint") + if err != nil { + fmt.Errorf("Error parsing API Endpoint: %s", err) + } + + config := &opc.Config{ + Username: opc.String("myusername"), + Password: opc.String("mypassword"), + IdentityDomain: opc.String("myidentitydomain"), + APIEndpoint: apiEndpoint, + LogLevel: opc.LogDebug, + // Logger: # Leave blank to use the default logger, or provide your own + // HTTPClient: # Leave blank to use default HTTP Client, or provider your own + } + // Create the Compute Client + client, err := compute.NewComputeClient(config) + if err != nil { + fmt.Errorf("Error creating OPC Compute Client: %s", err) + } + // Create instances client + instanceClient := client.Instances() + + // Instances Input + input := &compute.CreateInstanceInput{ + Name: "test-instance", + Label: "test", + Shape: "oc3", + ImageList: "/oracle/public/oel_6.7_apaas_16.4.5_1610211300", + Storage: nil, + BootOrder: nil, + SSHKeys: []string{}, + Attributes: map[string]interface{}{}, + } + + // Create the instance + instance, err := instanceClient.CreateInstance(input) + if err != nil { + fmt.Errorf("Error creating instance: %s", err) + } + fmt.Printf("Instance Created: %#v", instance) +} +``` + +Please refer to inline documentation for each resource that the compute client provides. + +Running the SDK Integration Tests +----------------------------- + +To authenticate with the Oracle Compute Cloud the following credentails must be set in the following environment variables: + +- `OPC_ENDPOINT` - Endpoint provided by Oracle Public Cloud (e.g. https://api-z13.compute.em2.oraclecloud.com/\) +- `OPC_USERNAME` - Username for Oracle Public Cloud +- `OPC_PASSWORD` - Password for Oracle Public Cloud +- `OPC_IDENTITY_DOMAIN` - Identity domain for Oracle Public Cloud + + +The Integration tests can be ran with the following command: +```sh +$ make testacc +``` + +Isolating a single SDK package can be done via the `TEST` environment variable +```sh +$ make testacc TEST=./compute +``` + +Isolating a single test within a package can be done via the `TESTARGS` environment variable +```sh +$ make testacc TEST=./compute TESTARGS='-run=TestAccIPAssociationLifeCycle' +``` + +Tests are ran with logs being sent to `ioutil.Discard` by default. +Display debug logs inside of tests by setting the `ORACLE_LOG` environment variable to any value. diff --git a/vendor/vendor.json b/vendor/vendor.json index e8c3fb6f1..2f8f8307d 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -790,6 +790,12 @@ "path": "github.com/hashicorp/go-multierror", "revision": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" }, + { + "checksumSHA1": "Nf2Gdn9M1KlUS3sovKfymO1VJF4=", + "path": "github.com/hashicorp/go-oracle-terraform", + "revision": "5a9a298c54339d2296d2f1135eae55a3a8f5e8c2", + "revisionTime": "2018-01-11T20:31:13Z" + }, { "checksumSHA1": "ErJHGU6AVPZM9yoY/xV11TwSjQs=", "path": "github.com/hashicorp/go-retryablehttp", From 967b858fc3262b00ab95433ce1bf2f598991b68d Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 12 Jan 2018 15:46:34 -0800 Subject: [PATCH 069/251] add go-oracle client --- .../go-oracle-terraform/client/client.go | 245 ++++++ .../go-oracle-terraform/client/logging.go | 28 + .../go-oracle-terraform/client/version.go | 35 + .../go-oracle-terraform/compute/acl.go | 138 +++ .../compute/authentication.go | 34 + .../compute/compute_client.go | 167 ++++ .../compute/compute_resource_client.go | 113 +++ .../go-oracle-terraform/compute/image_list.go | 154 ++++ .../compute/image_list_entries.go | 122 +++ .../go-oracle-terraform/compute/instances.go | 788 ++++++++++++++++++ .../compute/ip_address_associations.go | 152 ++++ .../compute/ip_address_prefix_set.go | 135 +++ .../compute/ip_address_reservations.go | 190 +++++ .../compute/ip_associations.go | 118 +++ .../compute/ip_network_exchange.go | 99 +++ .../compute/ip_networks.go | 186 +++++ .../compute/ip_reservations.go | 147 ++++ .../compute/machine_images.go | 143 ++++ .../compute/orchestration.go | 451 ++++++++++ .../go-oracle-terraform/compute/routes.go | 153 ++++ .../go-oracle-terraform/compute/sec_rules.go | 193 +++++ .../compute/security_applications.go | 150 ++++ .../compute/security_associations.go | 95 +++ .../compute/security_ip_lists.go | 113 +++ .../compute/security_lists.go | 131 +++ .../compute/security_protocols.go | 187 +++++ .../compute/security_rules.go | 266 ++++++ .../go-oracle-terraform/compute/snapshots.go | 217 +++++ .../go-oracle-terraform/compute/ssh_keys.go | 124 +++ .../compute/storage_volume_attachments.go | 178 ++++ .../compute/storage_volume_snapshots.go | 251 ++++++ .../compute/storage_volumes.go | 389 +++++++++ .../go-oracle-terraform/compute/test_utils.go | 119 +++ .../compute/virtual_nic.go | 52 ++ .../compute/virtual_nic_sets.go | 154 ++++ .../go-oracle-terraform/opc/config.go | 22 + .../go-oracle-terraform/opc/convert.go | 9 + .../go-oracle-terraform/opc/errors.go | 12 + .../go-oracle-terraform/opc/logger.go | 70 ++ vendor/vendor.json | 18 + 40 files changed, 6348 insertions(+) create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/client/client.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/client/logging.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/client/version.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/acl.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/authentication.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/compute_client.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/compute_resource_client.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list_entries.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/instances.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_associations.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_prefix_set.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_reservations.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_associations.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_network_exchange.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_networks.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_reservations.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/machine_images.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/orchestration.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/routes.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/sec_rules.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/security_applications.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/security_associations.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/security_ip_lists.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/security_lists.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/security_protocols.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/security_rules.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/snapshots.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ssh_keys.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_attachments.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_snapshots.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/test_utils.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic_sets.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/opc/config.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/opc/convert.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/opc/errors.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/opc/logger.go diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/client/client.go b/vendor/github.com/hashicorp/go-oracle-terraform/client/client.go new file mode 100644 index 000000000..4188d111b --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/client/client.go @@ -0,0 +1,245 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "runtime" + "time" + + "github.com/hashicorp/go-oracle-terraform/opc" +) + +const DEFAULT_MAX_RETRIES = 1 +const USER_AGENT_HEADER = "User-Agent" + +var ( + // defaultUserAgent builds a string containing the Go version, system archityecture and OS, + // and the go-autorest version. + defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-oracle-terraform/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + Version(), + ) +) + +// Client represents an authenticated compute client, with compute credentials and an api client. +type Client struct { + IdentityDomain *string + UserName *string + Password *string + APIEndpoint *url.URL + httpClient *http.Client + MaxRetries *int + UserAgent *string + logger opc.Logger + loglevel opc.LogLevelType +} + +func NewClient(c *opc.Config) (*Client, error) { + // First create a client + client := &Client{ + IdentityDomain: c.IdentityDomain, + UserName: c.Username, + Password: c.Password, + APIEndpoint: c.APIEndpoint, + UserAgent: &defaultUserAgent, + httpClient: c.HTTPClient, + MaxRetries: c.MaxRetries, + loglevel: c.LogLevel, + } + if c.UserAgent != nil { + client.UserAgent = c.UserAgent + } + + // Setup logger; defaults to stdout + if c.Logger == nil { + client.logger = opc.NewDefaultLogger() + } else { + client.logger = c.Logger + } + + // If LogLevel was not set to something different, + // double check for env var + if c.LogLevel == 0 { + client.loglevel = opc.LogLevel() + } + + // Default max retries if unset + if c.MaxRetries == nil { + client.MaxRetries = opc.Int(DEFAULT_MAX_RETRIES) + } + + // Protect against any nil http client + if c.HTTPClient == nil { + return nil, fmt.Errorf("No HTTP client specified in config") + } + + return client, nil +} + +// Marshalls the request body and returns the resulting byte slice +// This is split out of the BuildRequestBody method so as to allow +// the developer to print a debug string of the request body if they +// should so choose. +func (c *Client) MarshallRequestBody(body interface{}) ([]byte, error) { + // Verify interface isnt' nil + if body == nil { + return nil, nil + } + + return json.Marshal(body) +} + +// Builds an HTTP Request that accepts a pre-marshaled body parameter as a raw byte array +// Returns the raw HTTP Request and any error occured +func (c *Client) BuildRequestBody(method, path string, body []byte) (*http.Request, error) { + // Parse URL Path + urlPath, err := url.Parse(path) + if err != nil { + return nil, err + } + + var requestBody io.ReadSeeker + if len(body) != 0 { + requestBody = bytes.NewReader(body) + } + + // Create Request + req, err := http.NewRequest(method, c.formatURL(urlPath), requestBody) + if err != nil { + return nil, err + } + // Adding UserAgent Header + req.Header.Add(USER_AGENT_HEADER, *c.UserAgent) + + return req, nil +} + +// Build a new HTTP request that doesn't marshall the request body +func (c *Client) BuildNonJSONRequest(method, path string, body io.ReadSeeker) (*http.Request, error) { + // Parse URL Path + urlPath, err := url.Parse(path) + if err != nil { + return nil, err + } + + // Create request + req, err := http.NewRequest(method, c.formatURL(urlPath), body) + if err != nil { + return nil, err + } + // Adding UserAgentHeader + req.Header.Add(USER_AGENT_HEADER, *c.UserAgent) + + return req, nil +} + +// This method executes the http.Request from the BuildRequest method. +// It is split up to add additional authentication that is Oracle API dependent. +func (c *Client) ExecuteRequest(req *http.Request) (*http.Response, error) { + // Execute request with supplied client + resp, err := c.retryRequest(req) + if err != nil { + return resp, err + } + + if resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices { + return resp, nil + } + + oracleErr := &opc.OracleError{ + StatusCode: resp.StatusCode, + } + + // Even though the returned body will be in json form, it's undocumented what + // fields are actually returned. Once we get documentation of the actual + // error fields that are possible to be returned we can have stricter error types. + if resp.Body != nil { + buf := new(bytes.Buffer) + buf.ReadFrom(resp.Body) + oracleErr.Message = buf.String() + } + + // Should return the response object regardless of error, + // some resources need to verify and check status code on errors to + // determine if an error actually occurs or not. + return resp, oracleErr +} + +// Allow retrying the request until it either returns no error, +// or we exceed the number of max retries +func (c *Client) retryRequest(req *http.Request) (*http.Response, error) { + // Double check maxRetries is not nil + var retries int + if c.MaxRetries == nil { + retries = DEFAULT_MAX_RETRIES + } else { + retries = *c.MaxRetries + } + + var statusCode int + var errMessage string + + for i := 0; i < retries; i++ { + resp, err := c.httpClient.Do(req) + if err != nil { + return resp, err + } + + if resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices { + return resp, nil + } + + buf := new(bytes.Buffer) + buf.ReadFrom(resp.Body) + errMessage = buf.String() + statusCode = resp.StatusCode + c.DebugLogString(fmt.Sprintf("Encountered HTTP (%d) Error: %s", statusCode, errMessage)) + c.DebugLogString(fmt.Sprintf("%d/%d retries left", i+1, retries)) + } + + oracleErr := &opc.OracleError{ + StatusCode: statusCode, + Message: errMessage, + } + + // We ran out of retries to make, return the error and response + return nil, oracleErr +} + +func (c *Client) formatURL(path *url.URL) string { + return c.APIEndpoint.ResolveReference(path).String() +} + +// Retry function +func (c *Client) WaitFor(description string, timeout time.Duration, test func() (bool, error)) error { + tick := time.Tick(1 * time.Second) + + timeoutSeconds := int(timeout.Seconds()) + + for i := 0; i < timeoutSeconds; i++ { + select { + case <-tick: + completed, err := test() + c.DebugLogString(fmt.Sprintf("Waiting for %s (%d/%ds)", description, i, timeoutSeconds)) + if err != nil || completed { + return err + } + } + } + return fmt.Errorf("Timeout waiting for %s", description) +} + +// Used to determine if the checked resource was found or not. +func WasNotFoundError(e error) bool { + err, ok := e.(*opc.OracleError) + if ok { + return err.StatusCode == 404 + } + return false +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/client/logging.go b/vendor/github.com/hashicorp/go-oracle-terraform/client/logging.go new file mode 100644 index 000000000..893997486 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/client/logging.go @@ -0,0 +1,28 @@ +package client + +import ( + "bytes" + "fmt" + "net/http" + + "github.com/hashicorp/go-oracle-terraform/opc" +) + +// Log a string if debug logs are on +func (c *Client) DebugLogString(str string) { + if c.loglevel != opc.LogDebug { + return + } + c.logger.Log(str) +} + +func (c *Client) DebugLogReq(req *http.Request) { + // Don't need to log this if not debugging + if c.loglevel != opc.LogDebug { + return + } + buf := new(bytes.Buffer) + buf.ReadFrom(req.Body) + c.logger.Log(fmt.Sprintf("DEBUG: HTTP %s Req %s: %s", + req.Method, req.URL.String(), buf.String())) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/client/version.go b/vendor/github.com/hashicorp/go-oracle-terraform/client/version.go new file mode 100644 index 000000000..3538fd181 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/client/version.go @@ -0,0 +1,35 @@ +package client + +import ( + "bytes" + "fmt" + "strings" + "sync" +) + +const ( + major = 0 + minor = 6 + patch = 2 + tag = "" +) + +var once sync.Once +var version string + +// Version returns the semantic version (see http://semver.org). +func Version() string { + once.Do(func() { + semver := fmt.Sprintf("%d.%d.%d", major, minor, patch) + verBuilder := bytes.NewBufferString(semver) + if tag != "" && tag != "-" { + updated := strings.TrimPrefix(tag, "-") + _, err := verBuilder.WriteString("-" + updated) + if err == nil { + verBuilder = bytes.NewBufferString(semver) + } + } + version = verBuilder.String() + }) + return version +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/acl.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/acl.go new file mode 100644 index 000000000..877f51c28 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/acl.go @@ -0,0 +1,138 @@ +package compute + +// ACLsClient is a client for the ACLs functions of the Compute API. +type ACLsClient struct { + ResourceClient +} + +const ( + ACLDescription = "acl" + ACLContainerPath = "/network/v1/acl/" + ACLResourcePath = "/network/v1/acl" +) + +// ACLs obtains a ACLsClient which can be used to access to the +// ACLs functions of the Compute API +func (c *ComputeClient) ACLs() *ACLsClient { + return &ACLsClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: ACLDescription, + ContainerPath: ACLContainerPath, + ResourceRootPath: ACLResourcePath, + }} +} + +// ACLInfo describes an existing ACL. +type ACLInfo struct { + // Description of the ACL + Description string `json:"description"` + // Indicates whether the ACL is enabled + Enabled bool `json:"enabledFlag"` + // The name of the ACL + Name string `json:"name"` + // Tags associated with the ACL + Tags []string `json:"tags"` + // Uniform Resource Identifier for the ACL + URI string `json:"uri"` +} + +// CreateACLInput defines a ACL to be created. +type CreateACLInput struct { + // Description of the ACL + // Optional + Description string `json:"description"` + + // Enables or disables the ACL. Set to true by default. + //Set this to false to disable the ACL. + // Optional + Enabled bool `json:"enabledFlag"` + + // The name of the ACL to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Strings that you can use to tag the ACL. + // Optional + Tags []string `json:"tags"` +} + +// CreateACL creates a new ACL. +func (c *ACLsClient) CreateACL(createInput *CreateACLInput) (*ACLInfo, error) { + createInput.Name = c.getQualifiedName(createInput.Name) + + var aclInfo ACLInfo + if err := c.createResource(createInput, &aclInfo); err != nil { + return nil, err + } + + return c.success(&aclInfo) +} + +// GetACLInput describes the ACL to get +type GetACLInput struct { + // The name of the ACL to query for + // Required + Name string `json:"name"` +} + +// GetACL retrieves the ACL with the given name. +func (c *ACLsClient) GetACL(getInput *GetACLInput) (*ACLInfo, error) { + var aclInfo ACLInfo + if err := c.getResource(getInput.Name, &aclInfo); err != nil { + return nil, err + } + + return c.success(&aclInfo) +} + +// UpdateACLInput describes a secruity rule to update +type UpdateACLInput struct { + // Description of the ACL + // Optional + Description string `json:"description"` + + // Enables or disables the ACL. Set to true by default. + //Set this to false to disable the ACL. + // Optional + Enabled bool `json:"enabledFlag"` + + // The name of the ACL to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Strings that you can use to tag the ACL. + // Optional + Tags []string `json:"tags"` +} + +// UpdateACL modifies the properties of the ACL with the given name. +func (c *ACLsClient) UpdateACL(updateInput *UpdateACLInput) (*ACLInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + + var aclInfo ACLInfo + if err := c.updateResource(updateInput.Name, updateInput, &aclInfo); err != nil { + return nil, err + } + + return c.success(&aclInfo) +} + +// DeleteACLInput describes the ACL to delete +type DeleteACLInput struct { + // The name of the ACL to delete. + // Required + Name string `json:"name"` +} + +// DeleteACL deletes the ACL with the given name. +func (c *ACLsClient) DeleteACL(deleteInput *DeleteACLInput) error { + return c.deleteResource(deleteInput.Name) +} + +func (c *ACLsClient) success(aclInfo *ACLInfo) (*ACLInfo, error) { + aclInfo.Name = c.getUnqualifiedName(aclInfo.Name) + return aclInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/authentication.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/authentication.go new file mode 100644 index 000000000..6c62d7ddd --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/authentication.go @@ -0,0 +1,34 @@ +package compute + +import ( + "fmt" + "time" +) + +// AuthenticationReq represents the body of an authentication request. +type AuthenticationReq struct { + User string `json:"user"` + Password string `json:"password"` +} + +// Get a new auth cookie for the compute client +func (c *ComputeClient) getAuthenticationCookie() error { + req := AuthenticationReq{ + User: c.getUserName(), + Password: *c.client.Password, + } + + rsp, err := c.executeRequest("POST", "/authenticate/", req) + if err != nil { + return err + } + + if len(rsp.Cookies()) == 0 { + return fmt.Errorf("No authentication cookie found in response %#v", rsp) + } + + c.client.DebugLogString("Successfully authenticated to OPC") + c.authCookie = rsp.Cookies()[0] + c.cookieIssued = time.Now() + return nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/compute_client.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/compute_client.go new file mode 100644 index 000000000..3719dbb15 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/compute_client.go @@ -0,0 +1,167 @@ +package compute + +import ( + "fmt" + "net/http" + "regexp" + "strings" + "time" + + "github.com/hashicorp/go-oracle-terraform/client" + "github.com/hashicorp/go-oracle-terraform/opc" +) + +const CMP_ACME = "/Compute-%s" +const CMP_USERNAME = "/Compute-%s/%s" +const CMP_QUALIFIED_NAME = "%s/%s" + +// Client represents an authenticated compute client, with compute credentials and an api client. +type ComputeClient struct { + client *client.Client + authCookie *http.Cookie + cookieIssued time.Time +} + +func NewComputeClient(c *opc.Config) (*ComputeClient, error) { + computeClient := &ComputeClient{} + client, err := client.NewClient(c) + if err != nil { + return nil, err + } + computeClient.client = client + + if err := computeClient.getAuthenticationCookie(); err != nil { + return nil, err + } + + return computeClient, nil +} + +func (c *ComputeClient) executeRequest(method, path string, body interface{}) (*http.Response, error) { + reqBody, err := c.client.MarshallRequestBody(body) + if err != nil { + return nil, err + } + + req, err := c.client.BuildRequestBody(method, path, reqBody) + if err != nil { + return nil, err + } + + debugReqString := fmt.Sprintf("HTTP %s Req (%s)", method, path) + if body != nil { + req.Header.Set("Content-Type", "application/oracle-compute-v3+json") + // Don't leak credentials in STDERR + if path != "/authenticate/" { + debugReqString = fmt.Sprintf("%s:\n %+v", debugReqString, string(reqBody)) + } + } + // Log the request before the authentication cookie, so as not to leak credentials + c.client.DebugLogString(debugReqString) + // If we have an authentication cookie, let's authenticate, refreshing cookie if need be + if c.authCookie != nil { + if time.Since(c.cookieIssued).Minutes() > 25 { + c.authCookie = nil + if err := c.getAuthenticationCookie(); err != nil { + return nil, err + } + } + req.AddCookie(c.authCookie) + } + + resp, err := c.client.ExecuteRequest(req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ComputeClient) getACME() string { + return fmt.Sprintf(CMP_ACME, *c.client.IdentityDomain) +} + +func (c *ComputeClient) getUserName() string { + return fmt.Sprintf(CMP_USERNAME, *c.client.IdentityDomain, *c.client.UserName) +} + +func (c *ComputeClient) getQualifiedACMEName(name string) string { + if name == "" { + return "" + } + if strings.HasPrefix(name, "/Compute-") && len(strings.Split(name, "/")) == 1 { + return name + } + return fmt.Sprintf(CMP_QUALIFIED_NAME, c.getACME(), name) +} + +// From compute_client +// GetObjectName returns the fully-qualified name of an OPC object, e.g. /identity-domain/user@email/{name} +func (c *ComputeClient) getQualifiedName(name string) string { + if name == "" { + return "" + } + if strings.HasPrefix(name, "/oracle") || strings.HasPrefix(name, "/Compute-") { + return name + } + return fmt.Sprintf(CMP_QUALIFIED_NAME, c.getUserName(), name) +} + +func (c *ComputeClient) getObjectPath(root, name string) string { + return fmt.Sprintf("%s%s", root, c.getQualifiedName(name)) +} + +// GetUnqualifiedName returns the unqualified name of an OPC object, e.g. the {name} part of /identity-domain/user@email/{name} +func (c *ComputeClient) getUnqualifiedName(name string) string { + if name == "" { + return name + } + if strings.HasPrefix(name, "/oracle") { + return name + } + if !strings.Contains(name, "/") { + return name + } + + nameParts := strings.Split(name, "/") + return strings.Join(nameParts[3:], "/") +} + +func (c *ComputeClient) unqualify(names ...*string) { + for _, name := range names { + *name = c.getUnqualifiedName(*name) + } +} + +func (c *ComputeClient) unqualifyUrl(url *string) { + var validID = regexp.MustCompile(`(\/(Compute[^\/\s]+))(\/[^\/\s]+)(\/[^\/\s]+)`) + name := validID.FindString(*url) + *url = c.getUnqualifiedName(name) +} + +func (c *ComputeClient) getQualifiedList(list []string) []string { + for i, name := range list { + list[i] = c.getQualifiedName(name) + } + return list +} + +func (c *ComputeClient) getUnqualifiedList(list []string) []string { + for i, name := range list { + list[i] = c.getUnqualifiedName(name) + } + return list +} + +func (c *ComputeClient) getQualifiedListName(name string) string { + nameParts := strings.Split(name, ":") + listType := nameParts[0] + listName := nameParts[1] + return fmt.Sprintf("%s:%s", listType, c.getQualifiedName(listName)) +} + +func (c *ComputeClient) unqualifyListName(qualifiedName string) string { + nameParts := strings.Split(qualifiedName, ":") + listType := nameParts[0] + listName := nameParts[1] + return fmt.Sprintf("%s:%s", listType, c.getUnqualifiedName(listName)) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/compute_resource_client.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/compute_resource_client.go new file mode 100644 index 000000000..2c3e1dc9e --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/compute_resource_client.go @@ -0,0 +1,113 @@ +package compute + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +// ResourceClient is an AuthenticatedClient with some additional information about the resources to be addressed. +type ResourceClient struct { + *ComputeClient + ResourceDescription string + ContainerPath string + ResourceRootPath string +} + +func (c *ResourceClient) createResource(requestBody interface{}, responseBody interface{}) error { + resp, err := c.executeRequest("POST", c.ContainerPath, requestBody) + if err != nil { + return err + } + + return c.unmarshalResponseBody(resp, responseBody) +} + +func (c *ResourceClient) updateResource(name string, requestBody interface{}, responseBody interface{}) error { + resp, err := c.executeRequest("PUT", c.getObjectPath(c.ResourceRootPath, name), requestBody) + if err != nil { + return err + } + + return c.unmarshalResponseBody(resp, responseBody) +} + +func (c *ResourceClient) getResource(name string, responseBody interface{}) error { + var objectPath string + if name != "" { + objectPath = c.getObjectPath(c.ResourceRootPath, name) + } else { + objectPath = c.ResourceRootPath + } + resp, err := c.executeRequest("GET", objectPath, nil) + if err != nil { + return err + } + + return c.unmarshalResponseBody(resp, responseBody) +} + +func (c *ResourceClient) deleteResource(name string) error { + var objectPath string + if name != "" { + objectPath = c.getObjectPath(c.ResourceRootPath, name) + } else { + objectPath = c.ResourceRootPath + } + _, err := c.executeRequest("DELETE", objectPath, nil) + if err != nil { + return err + } + + // No errors and no response body to write + return nil +} + +func (c *ResourceClient) deleteOrchestration(name string) error { + var objectPath string + if name != "" { + objectPath = c.getObjectPath(c.ResourceRootPath, name) + } else { + objectPath = c.ResourceRootPath + } + // Set terminate to true as we always want to delete an orchestration + objectPath = fmt.Sprintf("%s?terminate=True", objectPath) + + _, err := c.executeRequest("DELETE", objectPath, nil) + if err != nil { + return err + } + + // No errors and no response body to write + return nil +} + +func (c *ResourceClient) unmarshalResponseBody(resp *http.Response, iface interface{}) error { + buf := new(bytes.Buffer) + buf.ReadFrom(resp.Body) + c.client.DebugLogString(fmt.Sprintf("HTTP Resp (%d): %s", resp.StatusCode, buf.String())) + // JSON decode response into interface + var tmp interface{} + dcd := json.NewDecoder(buf) + if err := dcd.Decode(&tmp); err != nil { + return err + } + + // Use mapstructure to weakly decode into the resulting interface + msdcd, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + WeaklyTypedInput: true, + Result: iface, + TagName: "json", + }) + if err != nil { + return err + } + + if err := msdcd.Decode(tmp); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list.go new file mode 100644 index 000000000..0d4ca06ba --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list.go @@ -0,0 +1,154 @@ +package compute + +const ( + ImageListDescription = "Image List" + ImageListContainerPath = "/imagelist/" + ImageListResourcePath = "/imagelist" +) + +// ImageListClient is a client for the Image List functions of the Compute API. +type ImageListClient struct { + ResourceClient +} + +// ImageList obtains an ImageListClient which can be used to access to the +// Image List functions of the Compute API +func (c *ComputeClient) ImageList() *ImageListClient { + return &ImageListClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: ImageListDescription, + ContainerPath: ImageListContainerPath, + ResourceRootPath: ImageListResourcePath, + }} +} + +type ImageListEntry struct { + // User-defined parameters, in JSON format, that can be passed to an instance of this machine image when it is launched. + Attributes map[string]interface{} `json:"attributes"` + + // Name of the Image List. + ImageList string `json:"imagelist"` + + // A list of machine images. + MachineImages []string `json:"machineimages"` + + // Uniform Resource Identifier. + URI string `json:"uri"` + + // Version number of these Machine Images in the Image List. + Version int `json:"version"` +} + +// ImageList describes an existing Image List. +type ImageList struct { + // The image list entry to be used, by default, when launching instances using this image list + Default int `json:"default"` + + // A description of this image list. + Description string `json:"description"` + + // Each machine image in an image list is identified by an image list entry. + Entries []ImageListEntry `json:"entries"` + + // The name of the Image List + Name string `json:"name"` + + // Uniform Resource Identifier + URI string `json:"uri"` +} + +// CreateImageListInput defines an Image List to be created. +type CreateImageListInput struct { + // The image list entry to be used, by default, when launching instances using this image list. + // If you don't specify this value, it is set to 1. + // Optional + Default int `json:"default"` + + // A description of this image list. + // Required + Description string `json:"description"` + + // The name of the Image List + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Required + Name string `json:"name"` +} + +// CreateImageList creates a new Image List with the given name, key and enabled flag. +func (c *ImageListClient) CreateImageList(createInput *CreateImageListInput) (*ImageList, error) { + var imageList ImageList + createInput.Name = c.getQualifiedName(createInput.Name) + if err := c.createResource(&createInput, &imageList); err != nil { + return nil, err + } + + return c.success(&imageList) +} + +// DeleteKeyInput describes the image list to delete +type DeleteImageListInput struct { + // The name of the Image List + Name string `json:name` +} + +// DeleteImageList deletes the Image List with the given name. +func (c *ImageListClient) DeleteImageList(deleteInput *DeleteImageListInput) error { + deleteInput.Name = c.getQualifiedName(deleteInput.Name) + return c.deleteResource(deleteInput.Name) +} + +// GetImageListInput describes the image list to get +type GetImageListInput struct { + // The name of the Image List + Name string `json:name` +} + +// GetImageList retrieves the Image List with the given name. +func (c *ImageListClient) GetImageList(getInput *GetImageListInput) (*ImageList, error) { + getInput.Name = c.getQualifiedName(getInput.Name) + + var imageList ImageList + if err := c.getResource(getInput.Name, &imageList); err != nil { + return nil, err + } + + return c.success(&imageList) +} + +// UpdateImageListInput defines an Image List to be updated +type UpdateImageListInput struct { + // The image list entry to be used, by default, when launching instances using this image list. + // If you don't specify this value, it is set to 1. + // Optional + Default int `json:"default"` + + // A description of this image list. + // Required + Description string `json:"description"` + + // The name of the Image List + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Required + Name string `json:"name"` +} + +// UpdateImageList updates the key and enabled flag of the Image List with the given name. +func (c *ImageListClient) UpdateImageList(updateInput *UpdateImageListInput) (*ImageList, error) { + var imageList ImageList + updateInput.Name = c.getQualifiedName(updateInput.Name) + if err := c.updateResource(updateInput.Name, updateInput, &imageList); err != nil { + return nil, err + } + return c.success(&imageList) +} + +func (c *ImageListClient) success(imageList *ImageList) (*ImageList, error) { + c.unqualify(&imageList.Name) + + for _, v := range imageList.Entries { + v.MachineImages = c.getUnqualifiedList(v.MachineImages) + } + + return imageList, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list_entries.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list_entries.go new file mode 100644 index 000000000..ac72ef45a --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list_entries.go @@ -0,0 +1,122 @@ +package compute + +import "fmt" + +const ( + ImageListEntryDescription = "image list entry" + ImageListEntryContainerPath = "/imagelist" + ImageListEntryResourcePath = "/imagelist" +) + +type ImageListEntriesClient struct { + ResourceClient +} + +// ImageListEntries() returns an ImageListEntriesClient that can be used to access the +// necessary CRUD functions for Image List Entry's. +func (c *ComputeClient) ImageListEntries() *ImageListEntriesClient { + return &ImageListEntriesClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: ImageListEntryDescription, + ContainerPath: ImageListEntryContainerPath, + ResourceRootPath: ImageListEntryResourcePath, + }, + } +} + +// ImageListEntryInfo contains the exported fields necessary to hold all the information about an +// Image List Entry +type ImageListEntryInfo struct { + // User-defined parameters, in JSON format, that can be passed to an instance of this machine + // image when it is launched. This field can be used, for example, to specify the location of + // a database server and login details. Instance metadata, including user-defined data is available + // at http://192.0.0.192/ within an instance. See Retrieving User-Defined Instance Attributes in Using + // Oracle Compute Cloud Service (IaaS). + Attributes map[string]interface{} `json:"attributes"` + // Name of the imagelist. + Name string `json:"imagelist"` + // A list of machine images. + MachineImages []string `json:"machineimages"` + // Uniform Resource Identifier for the Image List Entry + Uri string `json:"uri"` + // Version number of these machineImages in the imagelist. + Version int `json:"version"` +} + +type CreateImageListEntryInput struct { + // The name of the Image List + Name string + // User-defined parameters, in JSON format, that can be passed to an instance of this machine + // image when it is launched. This field can be used, for example, to specify the location of + // a database server and login details. Instance metadata, including user-defined data is + //available at http://192.0.0.192/ within an instance. See Retrieving User-Defined Instance + //Attributes in Using Oracle Compute Cloud Service (IaaS). + // Optional + Attributes map[string]interface{} `json:"attributes"` + // A list of machine images. + // Required + MachineImages []string `json:"machineimages"` + // The unique version of the entry in the image list. + // Required + Version int `json:"version"` +} + +// Create a new Image List Entry from an ImageListEntriesClient and an input struct. +// Returns a populated Info struct for the Image List Entry, and any errors +func (c *ImageListEntriesClient) CreateImageListEntry(input *CreateImageListEntryInput) (*ImageListEntryInfo, error) { + c.updateClientPaths(input.Name, -1) + var imageListEntryInfo ImageListEntryInfo + if err := c.createResource(&input, &imageListEntryInfo); err != nil { + return nil, err + } + return c.success(&imageListEntryInfo) +} + +type GetImageListEntryInput struct { + // The name of the Image List + Name string + // Version number of these machineImages in the imagelist. + Version int +} + +// Returns a populated ImageListEntryInfo struct from an input struct +func (c *ImageListEntriesClient) GetImageListEntry(input *GetImageListEntryInput) (*ImageListEntryInfo, error) { + c.updateClientPaths(input.Name, input.Version) + var imageListEntryInfo ImageListEntryInfo + if err := c.getResource("", &imageListEntryInfo); err != nil { + return nil, err + } + return c.success(&imageListEntryInfo) +} + +type DeleteImageListEntryInput struct { + // The name of the Image List + Name string + // Version number of these machineImages in the imagelist. + Version int +} + +func (c *ImageListEntriesClient) DeleteImageListEntry(input *DeleteImageListEntryInput) error { + c.updateClientPaths(input.Name, input.Version) + return c.deleteResource("") +} + +func (c *ImageListEntriesClient) updateClientPaths(name string, version int) { + var containerPath, resourcePath string + name = c.getQualifiedName(name) + containerPath = ImageListEntryContainerPath + name + "/entry/" + resourcePath = ImageListEntryContainerPath + name + "/entry" + if version != -1 { + containerPath = fmt.Sprintf("%s%d", containerPath, version) + resourcePath = fmt.Sprintf("%s/%d", resourcePath, version) + } + c.ContainerPath = containerPath + c.ResourceRootPath = resourcePath +} + +// Unqualifies any qualified fields in the IPNetworkInfo struct +func (c *ImageListEntriesClient) success(info *ImageListEntryInfo) (*ImageListEntryInfo, error) { + c.unqualifyUrl(&info.Uri) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/instances.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/instances.go new file mode 100644 index 000000000..ddfa8d6ec --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/instances.go @@ -0,0 +1,788 @@ +package compute + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/hashicorp/go-oracle-terraform/client" +) + +const WaitForInstanceReadyTimeout = time.Duration(3600 * time.Second) +const WaitForInstanceDeleteTimeout = time.Duration(3600 * time.Second) + +// InstancesClient is a client for the Instance functions of the Compute API. +type InstancesClient struct { + ResourceClient +} + +// Instances obtains an InstancesClient which can be used to access to the +// Instance functions of the Compute API +func (c *ComputeClient) Instances() *InstancesClient { + return &InstancesClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: "instance", + ContainerPath: "/launchplan/", + ResourceRootPath: "/instance", + }} +} + +type InstanceState string + +const ( + InstanceRunning InstanceState = "running" + InstanceInitializing InstanceState = "initializing" + InstancePreparing InstanceState = "preparing" + InstanceStarting InstanceState = "starting" + InstanceStopping InstanceState = "stopping" + InstanceShutdown InstanceState = "shutdown" + InstanceQueued InstanceState = "queued" + InstanceError InstanceState = "error" +) + +type InstanceDesiredState string + +const ( + InstanceDesiredRunning InstanceDesiredState = "running" + InstanceDesiredShutdown InstanceDesiredState = "shutdown" +) + +// InstanceInfo represents the Compute API's view of the state of an instance. +type InstanceInfo struct { + // The ID for the instance. Set by the SDK based on the request - not the API. + ID string + + // A dictionary of attributes to be made available to the instance. + // A value with the key "userdata" will be made available in an EC2-compatible manner. + Attributes map[string]interface{} `json:"attributes"` + + // The availability domain for the instance + AvailabilityDomain string `json:"availability_domain"` + + // Boot order list. + BootOrder []int `json:"boot_order"` + + // The default domain to use for the hostname and DNS lookups + Domain string `json:"domain"` + + // The desired state of an instance + DesiredState InstanceDesiredState `json:"desired_state"` + + // Optional ImageListEntry number. Default will be used if not specified + Entry int `json:"entry"` + + // The reason for the instance going to error state, if available. + ErrorReason string `json:"error_reason"` + + // SSH Server Fingerprint presented by the instance + Fingerprint string `json:"fingerprint"` + + // The hostname for the instance + Hostname string `json:"hostname"` + + // The format of the image + ImageFormat string `json:"image_format"` + + // Name of imagelist to be launched. + ImageList string `json:"imagelist"` + + // IP address of the instance. + IPAddress string `json:"ip"` + + // A label assigned by the user, specifically for defining inter-instance relationships. + Label string `json:"label"` + + // Name of this instance, generated by the server. + Name string `json:"name"` + + // Mapping of to network specifiers for virtual NICs to be attached to this instance. + Networking map[string]NetworkingInfo `json:"networking"` + + // A list of strings specifying arbitrary tags on nodes to be matched on placement. + PlacementRequirements []string `json:"placement_requirements"` + + // The OS platform for the instance. + Platform string `json:"platform"` + + // The priority at which this instance will be run + Priority string `json:"priority"` + + // Reference to the QuotaReservation, to be destroyed with the instance + QuotaReservation string `json:"quota_reservation"` + + // Array of relationship specifications to be satisfied on this instance's placement + Relationships []string `json:"relationships"` + + // Resolvers to use instead of the default resolvers + Resolvers []string `json:"resolvers"` + + // Add PTR records for the hostname + ReverseDNS bool `json:"reverse_dns"` + + // Type of instance, as defined on site configuration. + Shape string `json:"shape"` + + // Site to run on + Site string `json:"site"` + + // ID's of SSH keys that will be exposed to the instance. + SSHKeys []string `json:"sshkeys"` + + // The start time of the instance + StartTime string `json:"start_time"` + + // State of the instance. + State InstanceState `json:"state"` + + // The Storage Attachment information. + Storage []StorageAttachment `json:"storage_attachments"` + + // Array of tags associated with the instance. + Tags []string `json:"tags"` + + // vCable for this instance. + VCableID string `json:"vcable_id"` + + // Specify if the devices created for the instance are virtio devices. If not specified, the default + // will come from the cluster configuration file + Virtio bool `json:"virtio,omitempty"` + + // IP Address and port of the VNC console for the instance + VNC string `json:"vnc"` +} + +type StorageAttachment struct { + // The index number for the volume. + Index int `json:"index"` + + // The three-part name (/Compute-identity_domain/user/object) of the storage attachment. + Name string `json:"name"` + + // The three-part name (/Compute-identity_domain/user/object) of the storage volume attached to the instance. + StorageVolumeName string `json:"storage_volume_name"` +} + +func (i *InstanceInfo) getInstanceName() string { + return fmt.Sprintf(CMP_QUALIFIED_NAME, i.Name, i.ID) +} + +type CreateInstanceInput struct { + // A dictionary of user-defined attributes to be made available to the instance. + // Optional + Attributes map[string]interface{} `json:"attributes"` + // Boot order list + // Optional + BootOrder []int `json:"boot_order,omitempty"` + // The desired state of the opc instance. Can only be `running` or `shutdown` + // Omits if empty. + // Optional + DesiredState InstanceDesiredState `json:"desired_state,omitempty"` + // The host name assigned to the instance. On an Oracle Linux instance, + // this host name is displayed in response to the hostname command. + // Only relative DNS is supported. The domain name is suffixed to the host name + // that you specify. The host name must not end with a period. If you don't specify a + // host name, then a name is generated automatically. + // Optional + Hostname string `json:"hostname"` + // Name of imagelist to be launched. + // Optional + ImageList string `json:"imagelist"` + // A label assigned by the user, specifically for defining inter-instance relationships. + // Optional + Label string `json:"label"` + // Name of this instance, generated by the server. + // Optional + Name string `json:"name"` + // Networking information. + // Optional + Networking map[string]NetworkingInfo `json:"networking"` + // If set to true (default), then reverse DNS records are created. + // If set to false, no reverse DNS records are created. + // Optional + ReverseDNS bool `json:"reverse_dns,omitempty"` + // Type of instance, as defined on site configuration. + // Required + Shape string `json:"shape"` + // A list of the Storage Attachments you want to associate with the instance. + // Optional + Storage []StorageAttachmentInput `json:"storage_attachments,omitempty"` + // A list of the SSH public keys that you want to associate with the instance. + // Optional + SSHKeys []string `json:"sshkeys"` + // A list of tags to be supplied to the instance + // Optional + Tags []string `json:"tags"` + // Time to wait for an instance to be ready + Timeout time.Duration `json:"-"` +} + +type StorageAttachmentInput struct { + // The index number for the volume. The allowed range is 1 to 10. + // If you want to use a storage volume as the boot disk for an instance, you must specify the index number for that volume as 1. + // The index determines the device name by which the volume is exposed to the instance. + Index int `json:"index"` + // The three-part name (/Compute-identity_domain/user/object) of the storage volume that you want to attach to the instance. + // Note that volumes attached to an instance at launch time can't be detached. + Volume string `json:"volume"` +} + +const ReservationPrefix = "ipreservation" +const ReservationIPPrefix = "network/v1/ipreservation" + +type NICModel string + +const ( + NICDefaultModel NICModel = "e1000" +) + +// Struct of Networking info from a populated instance, or to be used as input to create an instance +type NetworkingInfo struct { + // The DNS name for the Shared network (Required) + // DNS A Record for an IP Network (Optional) + DNS []string `json:"dns,omitempty"` + // IP Network only. + // If you want to associate a static private IP Address, + // specify that here within the range of the supplied IPNetwork attribute. + // Optional + IPAddress string `json:"ip,omitempty"` + // IP Network only. + // The name of the IP Network you want to add the instance to. + // Required + IPNetwork string `json:"ipnetwork,omitempty"` + // IP Network only. + // Set interface as default gateway for all traffic + // Optional + IsDefaultGateway bool `json:"is_default_gateway,omitempty"` + // IP Network only. + // The hexadecimal MAC Address of the interface + // Optional + MACAddress string `json:"address,omitempty"` + // Shared Network only. + // The type of NIC used. Must be set to 'e1000' + // Required + Model NICModel `json:"model,omitempty"` + // IP Network and Shared Network + // The name servers that are sent through DHCP as option 6. + // You can specify a maximum of eight name server IP addresses per interface. + // Optional + NameServers []string `json:"name_servers,omitempty"` + // The names of an IP Reservation to associate in an IP Network (Optional) + // Indicates whether a temporary or permanent public IP Address should be assigned + // in a Shared Network (Required) + Nat []string `json:"nat,omitempty"` + // IP Network and Shared Network + // The search domains that should be sent through DHCP as option 119. + // You can enter a maximum of eight search domain zones per interface. + // Optional + SearchDomains []string `json:"search_domains,omitempty"` + // Shared Network only. + // The security lists that you want to add the instance to + // Required + SecLists []string `json:"seclists,omitempty"` + // IP Network Only + // The name of the vNIC + // Optional + Vnic string `json:"vnic,omitempty"` + // IP Network only. + // The names of the vNICSets you want to add the interface to. + // Optional + VnicSets []string `json:"vnicsets,omitempty"` +} + +// LaunchPlan defines a launch plan, used to launch instances with the supplied InstanceSpec(s) +type LaunchPlanInput struct { + // Describes an array of instances which should be launched + Instances []CreateInstanceInput `json:"instances"` + // Time to wait for instance boot + Timeout time.Duration `json:"-"` +} + +type LaunchPlanResponse struct { + // An array of instances which have been launched + Instances []InstanceInfo `json:"instances"` +} + +// LaunchInstance creates and submits a LaunchPlan to launch a new instance. +func (c *InstancesClient) CreateInstance(input *CreateInstanceInput) (*InstanceInfo, error) { + qualifiedSSHKeys := []string{} + for _, key := range input.SSHKeys { + qualifiedSSHKeys = append(qualifiedSSHKeys, c.getQualifiedName(key)) + } + + input.SSHKeys = qualifiedSSHKeys + + qualifiedStorageAttachments := []StorageAttachmentInput{} + for _, attachment := range input.Storage { + qualifiedStorageAttachments = append(qualifiedStorageAttachments, StorageAttachmentInput{ + Index: attachment.Index, + Volume: c.getQualifiedName(attachment.Volume), + }) + } + input.Storage = qualifiedStorageAttachments + + input.Networking = c.qualifyNetworking(input.Networking) + + input.Name = fmt.Sprintf(CMP_QUALIFIED_NAME, c.getUserName(), input.Name) + + plan := LaunchPlanInput{ + Instances: []CreateInstanceInput{*input}, + Timeout: input.Timeout, + } + + var ( + instanceInfo *InstanceInfo + instanceError error + ) + for i := 0; i < *c.ComputeClient.client.MaxRetries; i++ { + c.client.DebugLogString(fmt.Sprintf("(Iteration: %d of %d) Creating instance with name %s\n Plan: %+v", i, *c.ComputeClient.client.MaxRetries, input.Name, plan)) + + instanceInfo, instanceError = c.startInstance(input.Name, plan) + if instanceError == nil { + c.client.DebugLogString(fmt.Sprintf("(Iteration: %d of %d) Finished creating instance with name %s\n Info: %+v", i, *c.ComputeClient.client.MaxRetries, input.Name, instanceInfo)) + return instanceInfo, nil + } + } + return nil, instanceError +} + +func (c *InstancesClient) startInstance(name string, plan LaunchPlanInput) (*InstanceInfo, error) { + var responseBody LaunchPlanResponse + + if err := c.createResource(&plan, &responseBody); err != nil { + return nil, err + } + + if len(responseBody.Instances) == 0 { + return nil, fmt.Errorf("No instance information returned: %#v", responseBody) + } + + // Call wait for instance ready now, as creating the instance is an eventually consistent operation + getInput := &GetInstanceInput{ + Name: name, + ID: responseBody.Instances[0].ID, + } + + //timeout := WaitForInstanceReadyTimeout + if plan.Timeout == 0 { + plan.Timeout = WaitForInstanceReadyTimeout + } + + // Wait for instance to be ready and return the result + // Don't have to unqualify any objects, as the GetInstance method will handle that + instanceInfo, instanceError := c.WaitForInstanceRunning(getInput, plan.Timeout) + // If the instance enters an error state we need to delete the instance and retry + if instanceError != nil { + deleteInput := &DeleteInstanceInput{ + Name: name, + ID: responseBody.Instances[0].ID, + } + err := c.DeleteInstance(deleteInput) + if err != nil { + return nil, fmt.Errorf("Error deleting instance %s: %s", name, err) + } + return nil, instanceError + } + return instanceInfo, nil +} + +// Both of these fields are required. If they're not provided, things go wrong in +// incredibly amazing ways. +type GetInstanceInput struct { + // The Unqualified Name of this Instance + Name string + // The Unqualified ID of this Instance + ID string +} + +func (g *GetInstanceInput) String() string { + return fmt.Sprintf(CMP_QUALIFIED_NAME, g.Name, g.ID) +} + +// GetInstance retrieves information about an instance. +func (c *InstancesClient) GetInstance(input *GetInstanceInput) (*InstanceInfo, error) { + if input.ID == "" || input.Name == "" { + return nil, errors.New("Both instance name and ID need to be specified") + } + + var responseBody InstanceInfo + if err := c.getResource(input.String(), &responseBody); err != nil { + return nil, err + } + + if responseBody.Name == "" { + return nil, fmt.Errorf("Empty response body when requesting instance %s", input.Name) + } + + // The returned 'Name' attribute is the fully qualified instance name + "/" + ID + // Split these out to accurately populate the fields + nID := strings.Split(c.getUnqualifiedName(responseBody.Name), "/") + responseBody.Name = nID[0] + responseBody.ID = nID[1] + + c.unqualify(&responseBody.VCableID) + + // Unqualify SSH Key names + sshKeyNames := []string{} + for _, sshKeyRef := range responseBody.SSHKeys { + sshKeyNames = append(sshKeyNames, c.getUnqualifiedName(sshKeyRef)) + } + responseBody.SSHKeys = sshKeyNames + + var networkingErr error + responseBody.Networking, networkingErr = c.unqualifyNetworking(responseBody.Networking) + if networkingErr != nil { + return nil, networkingErr + } + responseBody.Storage = c.unqualifyStorage(responseBody.Storage) + + return &responseBody, nil +} + +type InstancesInfo struct { + Instances []InstanceInfo `json:"result"` +} + +type GetInstanceIdInput struct { + // Name of the instance you want to get + Name string +} + +// GetInstanceFromName loops through all the instances and finds the instance for the given name +// This is needed for orchestration since it doesn't return the id for the instance it creates. +func (c *InstancesClient) GetInstanceFromName(input *GetInstanceIdInput) (*InstanceInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var instancesInfo InstancesInfo + if err := c.getResource(fmt.Sprintf("%s/", c.getUserName()), &instancesInfo); err != nil { + return nil, err + } + + for _, i := range instancesInfo.Instances { + if strings.Contains(i.Name, input.Name) { + if i.Name == "" { + return nil, fmt.Errorf("Empty response body when requesting instance %s", input.Name) + } + + // The returned 'Name' attribute is the fully qualified instance name + "/" + ID + // Split these out to accurately populate the fields + nID := strings.Split(c.getUnqualifiedName(i.Name), "/") + i.Name = nID[0] + i.ID = nID[1] + + c.unqualify(&i.VCableID) + + // Unqualify SSH Key names + sshKeyNames := []string{} + for _, sshKeyRef := range i.SSHKeys { + sshKeyNames = append(sshKeyNames, c.getUnqualifiedName(sshKeyRef)) + } + i.SSHKeys = sshKeyNames + + var networkingErr error + i.Networking, networkingErr = c.unqualifyNetworking(i.Networking) + if networkingErr != nil { + return nil, networkingErr + } + i.Storage = c.unqualifyStorage(i.Storage) + + return &i, nil + } + } + + return nil, fmt.Errorf("Unable to find instance: %q", input.Name) +} + +type UpdateInstanceInput struct { + // Name of this instance, generated by the server. + // Required + Name string `json:"name"` + // The desired state of the opc instance. Can only be `running` or `shutdown` + // Omits if empty. + // Optional + DesiredState InstanceDesiredState `json:"desired_state,omitempty"` + // The ID of the instance + // Required + ID string `json:"-"` + // A list of tags to be supplied to the instance + // Optional + Tags []string `json:"tags,omitempty"` + // Time to wait for instance to be ready, or shutdown depending on desired state + Timeout time.Duration `json:"-"` +} + +func (g *UpdateInstanceInput) String() string { + return fmt.Sprintf(CMP_QUALIFIED_NAME, g.Name, g.ID) +} + +func (c *InstancesClient) UpdateInstance(input *UpdateInstanceInput) (*InstanceInfo, error) { + if input.Name == "" || input.ID == "" { + return nil, errors.New("Both instance name and ID need to be specified") + } + + input.Name = fmt.Sprintf(CMP_QUALIFIED_NAME, c.getUserName(), input.Name) + + var responseBody InstanceInfo + if err := c.updateResource(input.String(), input, &responseBody); err != nil { + return nil, err + } + + getInput := &GetInstanceInput{ + Name: input.Name, + ID: input.ID, + } + + if input.Timeout == 0 { + input.Timeout = WaitForInstanceReadyTimeout + } + + // Wait for the correct instance action depending on the current desired state. + // If the instance is already running, and the desired state is to be "running", the + // wait loop will only execute a single time to verify the instance state. Otherwise + // we wait until the correct action has finalized, either a shutdown or restart, catching + // any intermittent errors during the process. + if responseBody.DesiredState == InstanceDesiredRunning { + return c.WaitForInstanceRunning(getInput, input.Timeout) + } else { + return c.WaitForInstanceShutdown(getInput, input.Timeout) + } +} + +type DeleteInstanceInput struct { + // The Unqualified Name of this Instance + Name string + // The Unqualified ID of this Instance + ID string + // Time to wait for instance to be deleted + Timeout time.Duration +} + +func (d *DeleteInstanceInput) String() string { + return fmt.Sprintf(CMP_QUALIFIED_NAME, d.Name, d.ID) +} + +// DeleteInstance deletes an instance. +func (c *InstancesClient) DeleteInstance(input *DeleteInstanceInput) error { + // Call to delete the instance + if err := c.deleteResource(input.String()); err != nil { + return err + } + + if input.Timeout == 0 { + input.Timeout = WaitForInstanceDeleteTimeout + } + + // Wait for instance to be deleted + return c.WaitForInstanceDeleted(input, input.Timeout) +} + +// WaitForInstanceRunning waits for an instance to be completely initialized and available. +func (c *InstancesClient) WaitForInstanceRunning(input *GetInstanceInput, timeout time.Duration) (*InstanceInfo, error) { + var info *InstanceInfo + var getErr error + err := c.client.WaitFor("instance to be ready", timeout, func() (bool, error) { + info, getErr = c.GetInstance(input) + if getErr != nil { + return false, getErr + } + c.client.DebugLogString(fmt.Sprintf("Instance name is %v, Instance info is %+v", info.Name, info)) + switch s := info.State; s { + case InstanceError: + return false, fmt.Errorf("Error initializing instance: %s", info.ErrorReason) + case InstanceRunning: // Target State + c.client.DebugLogString("Instance Running") + return true, nil + case InstanceQueued: + c.client.DebugLogString("Instance Queuing") + return false, nil + case InstanceInitializing: + c.client.DebugLogString("Instance Initializing") + return false, nil + case InstancePreparing: + c.client.DebugLogString("Instance Preparing") + return false, nil + case InstanceStarting: + c.client.DebugLogString("Instance Starting") + return false, nil + default: + c.client.DebugLogString(fmt.Sprintf("Unknown instance state: %s, waiting", s)) + return false, nil + } + }) + return info, err +} + +// WaitForInstanceShutdown waits for an instance to be shutdown +func (c *InstancesClient) WaitForInstanceShutdown(input *GetInstanceInput, timeout time.Duration) (*InstanceInfo, error) { + var info *InstanceInfo + var getErr error + err := c.client.WaitFor("instance to be shutdown", timeout, func() (bool, error) { + info, getErr = c.GetInstance(input) + if getErr != nil { + return false, getErr + } + switch s := info.State; s { + case InstanceError: + return false, fmt.Errorf("Error initializing instance: %s", info.ErrorReason) + case InstanceRunning: + c.client.DebugLogString("Instance Running") + return false, nil + case InstanceQueued: + c.client.DebugLogString("Instance Queuing") + return false, nil + case InstanceInitializing: + c.client.DebugLogString("Instance Initializing") + return false, nil + case InstancePreparing: + c.client.DebugLogString("Instance Preparing") + return false, nil + case InstanceStarting: + c.client.DebugLogString("Instance Starting") + return false, nil + case InstanceShutdown: // Target State + c.client.DebugLogString("Instance Shutdown") + return true, nil + default: + c.client.DebugLogString(fmt.Sprintf("Unknown instance state: %s, waiting", s)) + return false, nil + } + }) + return info, err +} + +// WaitForInstanceDeleted waits for an instance to be fully deleted. +func (c *InstancesClient) WaitForInstanceDeleted(input *DeleteInstanceInput, timeout time.Duration) error { + return c.client.WaitFor("instance to be deleted", timeout, func() (bool, error) { + var info InstanceInfo + if err := c.getResource(input.String(), &info); err != nil { + if client.WasNotFoundError(err) { + // Instance could not be found, thus deleted + return true, nil + } + // Some other error occurred trying to get instance, exit + return false, err + } + switch s := info.State; s { + case InstanceError: + return false, fmt.Errorf("Error stopping instance: %s", info.ErrorReason) + case InstanceStopping: + c.client.DebugLogString("Instance stopping") + return false, nil + default: + c.client.DebugLogString(fmt.Sprintf("Unknown instance state: %s, waiting", s)) + return false, nil + } + }) +} + +func (c *InstancesClient) qualifyNetworking(info map[string]NetworkingInfo) map[string]NetworkingInfo { + qualifiedNetworks := map[string]NetworkingInfo{} + for k, v := range info { + qfd := v + sharedNetwork := false + if v.IPNetwork != "" { + // Network interface is for an IP Network + qfd.IPNetwork = c.getQualifiedName(v.IPNetwork) + sharedNetwork = true + } + if v.Vnic != "" { + qfd.Vnic = c.getQualifiedName(v.Vnic) + } + if v.Nat != nil { + qfd.Nat = c.qualifyNat(v.Nat, sharedNetwork) + } + if v.VnicSets != nil { + qfd.VnicSets = c.getQualifiedList(v.VnicSets) + } + if v.SecLists != nil { + // Network interface is for the shared network + secLists := []string{} + for _, v := range v.SecLists { + secLists = append(secLists, c.getQualifiedName(v)) + } + qfd.SecLists = secLists + } + + qualifiedNetworks[k] = qfd + } + return qualifiedNetworks +} + +func (c *InstancesClient) unqualifyNetworking(info map[string]NetworkingInfo) (map[string]NetworkingInfo, error) { + // Unqualify ip network + var err error + unqualifiedNetworks := map[string]NetworkingInfo{} + for k, v := range info { + unq := v + if v.IPNetwork != "" { + unq.IPNetwork = c.getUnqualifiedName(v.IPNetwork) + } + if v.Vnic != "" { + unq.Vnic = c.getUnqualifiedName(v.Vnic) + } + if v.Nat != nil { + unq.Nat, err = c.unqualifyNat(v.Nat) + if err != nil { + return nil, err + } + } + if v.VnicSets != nil { + unq.VnicSets = c.getUnqualifiedList(v.VnicSets) + } + if v.SecLists != nil { + secLists := []string{} + for _, v := range v.SecLists { + secLists = append(secLists, c.getUnqualifiedName(v)) + } + v.SecLists = secLists + } + unqualifiedNetworks[k] = unq + } + return unqualifiedNetworks, nil +} + +func (c *InstancesClient) qualifyNat(nat []string, shared bool) []string { + qualifiedNats := []string{} + for _, v := range nat { + if strings.HasPrefix(v, "ippool:/oracle") { + qualifiedNats = append(qualifiedNats, v) + continue + } + prefix := ReservationPrefix + if shared { + prefix = ReservationIPPrefix + } + qualifiedNats = append(qualifiedNats, fmt.Sprintf("%s:%s", prefix, c.getQualifiedName(v))) + } + return qualifiedNats +} + +func (c *InstancesClient) unqualifyNat(nat []string) ([]string, error) { + unQualifiedNats := []string{} + for _, v := range nat { + if strings.HasPrefix(v, "ippool:/oracle") { + unQualifiedNats = append(unQualifiedNats, v) + continue + } + n := strings.Split(v, ":") + if len(n) < 1 { + return nil, fmt.Errorf("Error unqualifying NAT: %s", v) + } + u := n[1] + unQualifiedNats = append(unQualifiedNats, c.getUnqualifiedName(u)) + } + return unQualifiedNats, nil +} + +func (c *InstancesClient) unqualifyStorage(attachments []StorageAttachment) []StorageAttachment { + unqAttachments := []StorageAttachment{} + for _, v := range attachments { + if v.StorageVolumeName != "" { + v.StorageVolumeName = c.getUnqualifiedName(v.StorageVolumeName) + } + unqAttachments = append(unqAttachments, v) + } + + return unqAttachments +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_associations.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_associations.go new file mode 100644 index 000000000..6c1167f12 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_associations.go @@ -0,0 +1,152 @@ +package compute + +const ( + IPAddressAssociationDescription = "ip address association" + IPAddressAssociationContainerPath = "/network/v1/ipassociation/" + IPAddressAssociationResourcePath = "/network/v1/ipassociation" +) + +type IPAddressAssociationsClient struct { + ResourceClient +} + +// IPAddressAssociations() returns an IPAddressAssociationsClient that can be used to access the +// necessary CRUD functions for IP Address Associations. +func (c *ComputeClient) IPAddressAssociations() *IPAddressAssociationsClient { + return &IPAddressAssociationsClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: IPAddressAssociationDescription, + ContainerPath: IPAddressAssociationContainerPath, + ResourceRootPath: IPAddressAssociationResourcePath, + }, + } +} + +// IPAddressAssociationInfo contains the exported fields necessary to hold all the information about an +// IP Address Association +type IPAddressAssociationInfo struct { + // The name of the NAT IP address reservation. + IPAddressReservation string `json:"ipAddressReservation"` + // Name of the virtual NIC associated with this NAT IP reservation. + Vnic string `json:"vnic"` + // The name of the IP Address Association + Name string `json:"name"` + // Description of the IP Address Association + Description string `json:"description"` + // Slice of tags associated with the IP Address Association + Tags []string `json:"tags"` + // Uniform Resource Identifier for the IP Address Association + Uri string `json:"uri"` +} + +type CreateIPAddressAssociationInput struct { + // The name of the IP Address Association to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // The name of the NAT IP address reservation. + // Optional + IPAddressReservation string `json:"ipAddressReservation,omitempty"` + + // Name of the virtual NIC associated with this NAT IP reservation. + // Optional + Vnic string `json:"vnic,omitempty"` + + // Description of the IPAddressAssociation + // Optional + Description string `json:"description"` + + // String slice of tags to apply to the IP Address Association object + // Optional + Tags []string `json:"tags"` +} + +// Create a new IP Address Association from an IPAddressAssociationsClient and an input struct. +// Returns a populated Info struct for the IP Address Association, and any errors +func (c *IPAddressAssociationsClient) CreateIPAddressAssociation(input *CreateIPAddressAssociationInput) (*IPAddressAssociationInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.IPAddressReservation = c.getQualifiedName(input.IPAddressReservation) + input.Vnic = c.getQualifiedName(input.Vnic) + + var ipInfo IPAddressAssociationInfo + if err := c.createResource(&input, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type GetIPAddressAssociationInput struct { + // The name of the IP Address Association to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +// Returns a populated IPAddressAssociationInfo struct from an input struct +func (c *IPAddressAssociationsClient) GetIPAddressAssociation(input *GetIPAddressAssociationInput) (*IPAddressAssociationInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo IPAddressAssociationInfo + if err := c.getResource(input.Name, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +// UpdateIPAddressAssociationInput defines what to update in a ip address association +type UpdateIPAddressAssociationInput struct { + // The name of the IP Address Association to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // The name of the NAT IP address reservation. + // Optional + IPAddressReservation string `json:"ipAddressReservation,omitempty"` + + // Name of the virtual NIC associated with this NAT IP reservation. + // Optional + Vnic string `json:"vnic,omitempty"` + + // Description of the IPAddressAssociation + // Optional + Description string `json:"description"` + + // String slice of tags to apply to the IP Address Association object + // Optional + Tags []string `json:"tags"` +} + +// UpdateIPAddressAssociation update the ip address association +func (c *IPAddressAssociationsClient) UpdateIPAddressAssociation(updateInput *UpdateIPAddressAssociationInput) (*IPAddressAssociationInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + updateInput.IPAddressReservation = c.getQualifiedName(updateInput.IPAddressReservation) + updateInput.Vnic = c.getQualifiedName(updateInput.Vnic) + var ipInfo IPAddressAssociationInfo + if err := c.updateResource(updateInput.Name, updateInput, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type DeleteIPAddressAssociationInput struct { + // The name of the IP Address Association to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *IPAddressAssociationsClient) DeleteIPAddressAssociation(input *DeleteIPAddressAssociationInput) error { + return c.deleteResource(input.Name) +} + +// Unqualifies any qualified fields in the IPAddressAssociationInfo struct +func (c *IPAddressAssociationsClient) success(info *IPAddressAssociationInfo) (*IPAddressAssociationInfo, error) { + c.unqualify(&info.Name) + c.unqualify(&info.Vnic) + c.unqualify(&info.IPAddressReservation) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_prefix_set.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_prefix_set.go new file mode 100644 index 000000000..3f1503c08 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_prefix_set.go @@ -0,0 +1,135 @@ +package compute + +const ( + IPAddressPrefixSetDescription = "ip address prefix set" + IPAddressPrefixSetContainerPath = "/network/v1/ipaddressprefixset/" + IPAddressPrefixSetResourcePath = "/network/v1/ipaddressprefixset" +) + +type IPAddressPrefixSetsClient struct { + ResourceClient +} + +// IPAddressPrefixSets() returns an IPAddressPrefixSetsClient that can be used to access the +// necessary CRUD functions for IP Address Prefix Sets. +func (c *ComputeClient) IPAddressPrefixSets() *IPAddressPrefixSetsClient { + return &IPAddressPrefixSetsClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: IPAddressPrefixSetDescription, + ContainerPath: IPAddressPrefixSetContainerPath, + ResourceRootPath: IPAddressPrefixSetResourcePath, + }, + } +} + +// IPAddressPrefixSetInfo contains the exported fields necessary to hold all the information about an +// IP Address Prefix Set +type IPAddressPrefixSetInfo struct { + // The name of the IP Address Prefix Set + Name string `json:"name"` + // Description of the IP Address Prefix Set + Description string `json:"description"` + // List of CIDR IPv4 prefixes assigned in the virtual network. + IPAddressPrefixes []string `json:"ipAddressPrefixes"` + // Slice of tags associated with the IP Address Prefix Set + Tags []string `json:"tags"` + // Uniform Resource Identifier for the IP Address Prefix Set + Uri string `json:"uri"` +} + +type CreateIPAddressPrefixSetInput struct { + // The name of the IP Address Prefix Set to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Description of the IPAddressPrefixSet + // Optional + Description string `json:"description"` + + // List of CIDR IPv4 prefixes assigned in the virtual network. + // Optional + IPAddressPrefixes []string `json:"ipAddressPrefixes"` + + // String slice of tags to apply to the IP Address Prefix Set object + // Optional + Tags []string `json:"tags"` +} + +// Create a new IP Address Prefix Set from an IPAddressPrefixSetsClient and an input struct. +// Returns a populated Info struct for the IP Address Prefix Set, and any errors +func (c *IPAddressPrefixSetsClient) CreateIPAddressPrefixSet(input *CreateIPAddressPrefixSetInput) (*IPAddressPrefixSetInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo IPAddressPrefixSetInfo + if err := c.createResource(&input, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type GetIPAddressPrefixSetInput struct { + // The name of the IP Address Prefix Set to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +// Returns a populated IPAddressPrefixSetInfo struct from an input struct +func (c *IPAddressPrefixSetsClient) GetIPAddressPrefixSet(input *GetIPAddressPrefixSetInput) (*IPAddressPrefixSetInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo IPAddressPrefixSetInfo + if err := c.getResource(input.Name, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +// UpdateIPAddressPrefixSetInput defines what to update in a ip address prefix set +type UpdateIPAddressPrefixSetInput struct { + // The name of the IP Address Prefix Set to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Description of the IPAddressPrefixSet + // Optional + Description string `json:"description"` + + // List of CIDR IPv4 prefixes assigned in the virtual network. + IPAddressPrefixes []string `json:"ipAddressPrefixes"` + + // String slice of tags to apply to the IP Address Prefix Set object + // Optional + Tags []string `json:"tags"` +} + +// UpdateIPAddressPrefixSet update the ip address prefix set +func (c *IPAddressPrefixSetsClient) UpdateIPAddressPrefixSet(updateInput *UpdateIPAddressPrefixSetInput) (*IPAddressPrefixSetInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + var ipInfo IPAddressPrefixSetInfo + if err := c.updateResource(updateInput.Name, updateInput, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type DeleteIPAddressPrefixSetInput struct { + // The name of the IP Address Prefix Set to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *IPAddressPrefixSetsClient) DeleteIPAddressPrefixSet(input *DeleteIPAddressPrefixSetInput) error { + return c.deleteResource(input.Name) +} + +// Unqualifies any qualified fields in the IPAddressPrefixSetInfo struct +func (c *IPAddressPrefixSetsClient) success(info *IPAddressPrefixSetInfo) (*IPAddressPrefixSetInfo, error) { + c.unqualify(&info.Name) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_reservations.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_reservations.go new file mode 100644 index 000000000..a1175711e --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_reservations.go @@ -0,0 +1,190 @@ +package compute + +import ( + "fmt" + "path/filepath" +) + +// IPAddressReservationsClient is a client to manage ip address reservation resources +type IPAddressReservationsClient struct { + *ResourceClient +} + +const ( + IPAddressReservationDescription = "IP Address Reservation" + IPAddressReservationContainerPath = "/network/v1/ipreservation/" + IPAddressReservationResourcePath = "/network/v1/ipreservation" + IPAddressReservationQualifier = "/oracle/public" +) + +// IPAddressReservations returns an IPAddressReservationsClient to manage IP address reservation +// resources +func (c *ComputeClient) IPAddressReservations() *IPAddressReservationsClient { + return &IPAddressReservationsClient{ + ResourceClient: &ResourceClient{ + ComputeClient: c, + ResourceDescription: IPAddressReservationDescription, + ContainerPath: IPAddressReservationContainerPath, + ResourceRootPath: IPAddressReservationResourcePath, + }, + } +} + +// IPAddressReservation describes an IP Address reservation +type IPAddressReservation struct { + // Description of the IP Address Reservation + Description string `json:"description"` + + // Reserved NAT IPv4 address from the IP Address Pool + IPAddress string `json:"ipAddress"` + + // Name of the IP Address pool to reserve the NAT IP from + IPAddressPool string `json:"ipAddressPool"` + + // Name of the reservation + Name string `json:"name"` + + // Tags associated with the object + Tags []string `json:"tags"` + + // Uniform Resource Identified for the reservation + Uri string `json:"uri"` +} + +const ( + PublicIPAddressPool = "public-ippool" + PrivateIPAddressPool = "cloud-ippool" +) + +// CreateIPAddressReservationInput defines input parameters to create an ip address reservation +type CreateIPAddressReservationInput struct { + // Description of the IP Address Reservation + // Optional + Description string `json:"description"` + + // IP Address pool from which to reserve an IP Address. + // Can be one of the following: + // + // 'public-ippool' - When you attach an IP Address from this pool to an instance, you enable + // access between the public Internet and the instance + // 'cloud-ippool' - When you attach an IP Address from this pool to an instance, the instance + // can communicate privately with other Oracle Cloud Services + // Optional + IPAddressPool string `json:"ipAddressPool"` + + // The name of the reservation to create + // Required + Name string `json:"name"` + + // Tags to associate with the IP Reservation + // Optional + Tags []string `json:"tags"` +} + +// Takes an input struct, creates an IP Address reservation, and returns the info struct and any errors +func (c *IPAddressReservationsClient) CreateIPAddressReservation(input *CreateIPAddressReservationInput) (*IPAddressReservation, error) { + var ipAddrRes IPAddressReservation + // Qualify supplied name + input.Name = c.getQualifiedName(input.Name) + // Qualify supplied address pool if not nil + if input.IPAddressPool != "" { + input.IPAddressPool = c.qualifyIPAddressPool(input.IPAddressPool) + } + + if err := c.createResource(input, &ipAddrRes); err != nil { + return nil, err + } + + return c.success(&ipAddrRes) +} + +// Parameters to retrieve information on an ip address reservation +type GetIPAddressReservationInput struct { + // Name of the IP Reservation + // Required + Name string `json:"name"` +} + +// Returns an IP Address Reservation and any errors +func (c *IPAddressReservationsClient) GetIPAddressReservation(input *GetIPAddressReservationInput) (*IPAddressReservation, error) { + var ipAddrRes IPAddressReservation + + input.Name = c.getQualifiedName(input.Name) + if err := c.getResource(input.Name, &ipAddrRes); err != nil { + return nil, err + } + + return c.success(&ipAddrRes) +} + +// Parameters to update an IP Address reservation +type UpdateIPAddressReservationInput struct { + // Description of the IP Address Reservation + // Optional + Description string `json:"description"` + + // IP Address pool from which to reserve an IP Address. + // Can be one of the following: + // + // 'public-ippool' - When you attach an IP Address from this pool to an instance, you enable + // access between the public Internet and the instance + // 'cloud-ippool' - When you attach an IP Address from this pool to an instance, the instance + // can communicate privately with other Oracle Cloud Services + // Optional + IPAddressPool string `json:"ipAddressPool"` + + // The name of the reservation to create + // Required + Name string `json:"name"` + + // Tags to associate with the IP Reservation + // Optional + Tags []string `json:"tags"` +} + +func (c *IPAddressReservationsClient) UpdateIPAddressReservation(input *UpdateIPAddressReservationInput) (*IPAddressReservation, error) { + var ipAddrRes IPAddressReservation + + // Qualify supplied name + input.Name = c.getQualifiedName(input.Name) + // Qualify supplied address pool if not nil + if input.IPAddressPool != "" { + input.IPAddressPool = c.qualifyIPAddressPool(input.IPAddressPool) + } + + if err := c.updateResource(input.Name, input, &ipAddrRes); err != nil { + return nil, err + } + + return c.success(&ipAddrRes) +} + +// Parameters to delete an IP Address Reservation +type DeleteIPAddressReservationInput struct { + // The name of the reservation to delete + Name string `json:"name"` +} + +func (c *IPAddressReservationsClient) DeleteIPAddressReservation(input *DeleteIPAddressReservationInput) error { + input.Name = c.getQualifiedName(input.Name) + return c.deleteResource(input.Name) +} + +func (c *IPAddressReservationsClient) success(result *IPAddressReservation) (*IPAddressReservation, error) { + c.unqualify(&result.Name) + if result.IPAddressPool != "" { + result.IPAddressPool = c.unqualifyIPAddressPool(result.IPAddressPool) + } + + return result, nil +} + +func (c *IPAddressReservationsClient) qualifyIPAddressPool(input string) string { + // Add '/oracle/public/' + return fmt.Sprintf("%s/%s", IPAddressReservationQualifier, input) +} + +func (c *IPAddressReservationsClient) unqualifyIPAddressPool(input string) string { + // Remove '/oracle/public/' + return filepath.Base(input) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_associations.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_associations.go new file mode 100644 index 000000000..09e194985 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_associations.go @@ -0,0 +1,118 @@ +package compute + +import ( + "fmt" + "strings" +) + +// IPAssociationsClient is a client for the IP Association functions of the Compute API. +type IPAssociationsClient struct { + *ResourceClient +} + +// IPAssociations obtains a IPAssociationsClient which can be used to access to the +// IP Association functions of the Compute API +func (c *ComputeClient) IPAssociations() *IPAssociationsClient { + return &IPAssociationsClient{ + ResourceClient: &ResourceClient{ + ComputeClient: c, + ResourceDescription: "ip association", + ContainerPath: "/ip/association/", + ResourceRootPath: "/ip/association", + }} +} + +// IPAssociationInfo describes an existing IP association. +type IPAssociationInfo struct { + // TODO: it'd probably make sense to expose the `ip` field here too? + + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` + + // The three-part name of the IP reservation object in the format (/Compute-identity_domain/user/object). + // An IP reservation is a public IP address which is attached to an Oracle Compute Cloud Service instance that requires access to or from the Internet. + Reservation string `json:"reservation"` + + // The type of IP Address to associate with this instance + // for a Dynamic IP address specify `ippool:/oracle/public/ippool`. + // for a Static IP address specify the three part name of the existing IP reservation + ParentPool string `json:"parentpool"` + + // Uniform Resource Identifier for the IP Association + URI string `json:"uri"` + + // The three-part name of a vcable ID of an instance that is associated with the IP reservation. + VCable string `json:"vcable"` +} + +type CreateIPAssociationInput struct { + // The type of IP Address to associate with this instance + // for a Dynamic IP address specify `ippool:/oracle/public/ippool`. + // for a Static IP address specify the three part name of the existing IP reservation + // Required + ParentPool string `json:"parentpool"` + + // The three-part name of the vcable ID of the instance that you want to associate with an IP address. The three-part name is in the format: /Compute-identity_domain/user/object. + // Required + VCable string `json:"vcable"` +} + +// CreateIPAssociation creates a new IP association with the supplied vcable and parentpool. +func (c *IPAssociationsClient) CreateIPAssociation(input *CreateIPAssociationInput) (*IPAssociationInfo, error) { + input.VCable = c.getQualifiedName(input.VCable) + input.ParentPool = c.getQualifiedParentPoolName(input.ParentPool) + var assocInfo IPAssociationInfo + if err := c.createResource(input, &assocInfo); err != nil { + return nil, err + } + + return c.success(&assocInfo) +} + +type GetIPAssociationInput struct { + // The three-part name of the IP Association + // Required. + Name string `json:"name"` +} + +// GetIPAssociation retrieves the IP association with the given name. +func (c *IPAssociationsClient) GetIPAssociation(input *GetIPAssociationInput) (*IPAssociationInfo, error) { + var assocInfo IPAssociationInfo + if err := c.getResource(input.Name, &assocInfo); err != nil { + return nil, err + } + + return c.success(&assocInfo) +} + +type DeleteIPAssociationInput struct { + // The three-part name of the IP Association + // Required. + Name string `json:"name"` +} + +// DeleteIPAssociation deletes the IP association with the given name. +func (c *IPAssociationsClient) DeleteIPAssociation(input *DeleteIPAssociationInput) error { + return c.deleteResource(input.Name) +} + +func (c *IPAssociationsClient) getQualifiedParentPoolName(parentpool string) string { + parts := strings.Split(parentpool, ":") + pooltype := parts[0] + name := parts[1] + return fmt.Sprintf("%s:%s", pooltype, c.getQualifiedName(name)) +} + +func (c *IPAssociationsClient) unqualifyParentPoolName(parentpool *string) { + parts := strings.Split(*parentpool, ":") + pooltype := parts[0] + name := parts[1] + *parentpool = fmt.Sprintf("%s:%s", pooltype, c.getUnqualifiedName(name)) +} + +// Unqualifies identifiers +func (c *IPAssociationsClient) success(assocInfo *IPAssociationInfo) (*IPAssociationInfo, error) { + c.unqualify(&assocInfo.Name, &assocInfo.VCable) + c.unqualifyParentPoolName(&assocInfo.ParentPool) + return assocInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_network_exchange.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_network_exchange.go new file mode 100644 index 000000000..1df33f296 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_network_exchange.go @@ -0,0 +1,99 @@ +package compute + +const ( + IPNetworkExchangeDescription = "ip network exchange" + IPNetworkExchangeContainerPath = "/network/v1/ipnetworkexchange/" + IPNetworkExchangeResourcePath = "/network/v1/ipnetworkexchange" +) + +type IPNetworkExchangesClient struct { + ResourceClient +} + +// IPNetworkExchanges() returns an IPNetworkExchangesClient that can be used to access the +// necessary CRUD functions for IP Network Exchanges. +func (c *ComputeClient) IPNetworkExchanges() *IPNetworkExchangesClient { + return &IPNetworkExchangesClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: IPNetworkExchangeDescription, + ContainerPath: IPNetworkExchangeContainerPath, + ResourceRootPath: IPNetworkExchangeResourcePath, + }, + } +} + +// IPNetworkExchangeInfo contains the exported fields necessary to hold all the information about an +// IP Network Exchange +type IPNetworkExchangeInfo struct { + // The name of the IP Network Exchange + Name string `json:"name"` + // Description of the IP Network Exchange + Description string `json:"description"` + // Slice of tags associated with the IP Network Exchange + Tags []string `json:"tags"` + // Uniform Resource Identifier for the IP Network Exchange + Uri string `json:"uri"` +} + +type CreateIPNetworkExchangeInput struct { + // The name of the IP Network Exchange to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Description of the IPNetworkExchange + // Optional + Description string `json:"description"` + + // String slice of tags to apply to the IP Network Exchange object + // Optional + Tags []string `json:"tags"` +} + +// Create a new IP Network Exchange from an IPNetworkExchangesClient and an input struct. +// Returns a populated Info struct for the IP Network Exchange, and any errors +func (c *IPNetworkExchangesClient) CreateIPNetworkExchange(input *CreateIPNetworkExchangeInput) (*IPNetworkExchangeInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo IPNetworkExchangeInfo + if err := c.createResource(&input, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type GetIPNetworkExchangeInput struct { + // The name of the IP Network Exchange to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +// Returns a populated IPNetworkExchangeInfo struct from an input struct +func (c *IPNetworkExchangesClient) GetIPNetworkExchange(input *GetIPNetworkExchangeInput) (*IPNetworkExchangeInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo IPNetworkExchangeInfo + if err := c.getResource(input.Name, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type DeleteIPNetworkExchangeInput struct { + // The name of the IP Network Exchange to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *IPNetworkExchangesClient) DeleteIPNetworkExchange(input *DeleteIPNetworkExchangeInput) error { + return c.deleteResource(input.Name) +} + +// Unqualifies any qualified fields in the IPNetworkExchangeInfo struct +func (c *IPNetworkExchangesClient) success(info *IPNetworkExchangeInfo) (*IPNetworkExchangeInfo, error) { + c.unqualify(&info.Name) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_networks.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_networks.go new file mode 100644 index 000000000..bfc324845 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_networks.go @@ -0,0 +1,186 @@ +package compute + +const ( + IPNetworkDescription = "ip network" + IPNetworkContainerPath = "/network/v1/ipnetwork/" + IPNetworkResourcePath = "/network/v1/ipnetwork" +) + +type IPNetworksClient struct { + ResourceClient +} + +// IPNetworks() returns an IPNetworksClient that can be used to access the +// necessary CRUD functions for IP Networks. +func (c *ComputeClient) IPNetworks() *IPNetworksClient { + return &IPNetworksClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: IPNetworkDescription, + ContainerPath: IPNetworkContainerPath, + ResourceRootPath: IPNetworkResourcePath, + }, + } +} + +// IPNetworkInfo contains the exported fields necessary to hold all the information about an +// IP Network +type IPNetworkInfo struct { + // The name of the IP Network + Name string `json:"name"` + // The CIDR IPv4 prefix associated with the IP Network + IPAddressPrefix string `json:"ipAddressPrefix"` + // Name of the IP Network Exchange associated with the IP Network + IPNetworkExchange string `json:"ipNetworkExchange,omitempty"` + // Description of the IP Network + Description string `json:"description"` + // Whether public internet access was enabled using NAPT for VNICs without any public IP reservation + PublicNaptEnabled bool `json:"publicNaptEnabledFlag"` + // Slice of tags associated with the IP Network + Tags []string `json:"tags"` + // Uniform Resource Identifier for the IP Network + Uri string `json:"uri"` +} + +type CreateIPNetworkInput struct { + // The name of the IP Network to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Specify the size of the IP Subnet. It is a range of IPv4 addresses assigned in the virtual + // network, in CIDR address prefix format. + // While specifying the IP address prefix take care of the following points: + // + //* These IP addresses aren't part of the common pool of Oracle-provided IP addresses used by the shared network. + // + //* There's no conflict with the range of IP addresses used in another IP network, the IP addresses used your on-premises network, or with the range of private IP addresses used in the shared network. If IP networks with overlapping IP subnets are linked to an IP exchange, packets going to and from those IP networks are dropped. + // + //* The upper limit of the CIDR block size for an IP network is /16. + // + //Note: The first IP address of any IP network is reserved for the default gateway, the DHCP server, and the DNS server of that IP network. + // Required + IPAddressPrefix string `json:"ipAddressPrefix"` + + //Specify the IP network exchange to which the IP network belongs. + //You can add an IP network to only one IP network exchange, but an IP network exchange + //can include multiple IP networks. An IP network exchange enables access between IP networks + //that have non-overlapping addresses, so that instances on these networks can exchange packets + //with each other without NAT. + // Optional + IPNetworkExchange string `json:"ipNetworkExchange,omitempty"` + + // Description of the IPNetwork + // Optional + Description string `json:"description"` + + // Enable public internet access using NAPT for VNICs without any public IP reservation + // Optional + PublicNaptEnabled bool `json:"publicNaptEnabledFlag"` + + // String slice of tags to apply to the IP Network object + // Optional + Tags []string `json:"tags"` +} + +// Create a new IP Network from an IPNetworksClient and an input struct. +// Returns a populated Info struct for the IP Network, and any errors +func (c *IPNetworksClient) CreateIPNetwork(input *CreateIPNetworkInput) (*IPNetworkInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.IPNetworkExchange = c.getQualifiedName(input.IPNetworkExchange) + + var ipInfo IPNetworkInfo + if err := c.createResource(&input, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type GetIPNetworkInput struct { + // The name of the IP Network to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +// Returns a populated IPNetworkInfo struct from an input struct +func (c *IPNetworksClient) GetIPNetwork(input *GetIPNetworkInput) (*IPNetworkInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo IPNetworkInfo + if err := c.getResource(input.Name, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type UpdateIPNetworkInput struct { + // The name of the IP Network to update. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Specify the size of the IP Subnet. It is a range of IPv4 addresses assigned in the virtual + // network, in CIDR address prefix format. + // While specifying the IP address prefix take care of the following points: + // + //* These IP addresses aren't part of the common pool of Oracle-provided IP addresses used by the shared network. + // + //* There's no conflict with the range of IP addresses used in another IP network, the IP addresses used your on-premises network, or with the range of private IP addresses used in the shared network. If IP networks with overlapping IP subnets are linked to an IP exchange, packets going to and from those IP networks are dropped. + // + //* The upper limit of the CIDR block size for an IP network is /16. + // + //Note: The first IP address of any IP network is reserved for the default gateway, the DHCP server, and the DNS server of that IP network. + // Required + IPAddressPrefix string `json:"ipAddressPrefix"` + + //Specify the IP network exchange to which the IP network belongs. + //You can add an IP network to only one IP network exchange, but an IP network exchange + //can include multiple IP networks. An IP network exchange enables access between IP networks + //that have non-overlapping addresses, so that instances on these networks can exchange packets + //with each other without NAT. + // Optional + IPNetworkExchange string `json:"ipNetworkExchange,omitempty"` + + // Description of the IPNetwork + // Optional + Description string `json:"description"` + + // Enable public internet access using NAPT for VNICs without any public IP reservation + // Optional + PublicNaptEnabled bool `json:"publicNaptEnabledFlag"` + + // String slice of tags to apply to the IP Network object + // Optional + Tags []string `json:"tags"` +} + +func (c *IPNetworksClient) UpdateIPNetwork(input *UpdateIPNetworkInput) (*IPNetworkInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.IPNetworkExchange = c.getQualifiedName(input.IPNetworkExchange) + + var ipInfo IPNetworkInfo + if err := c.updateResource(input.Name, &input, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type DeleteIPNetworkInput struct { + // The name of the IP Network to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *IPNetworksClient) DeleteIPNetwork(input *DeleteIPNetworkInput) error { + return c.deleteResource(input.Name) +} + +// Unqualifies any qualified fields in the IPNetworkInfo struct +func (c *IPNetworksClient) success(info *IPNetworkInfo) (*IPNetworkInfo, error) { + c.unqualify(&info.Name) + c.unqualify(&info.IPNetworkExchange) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_reservations.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_reservations.go new file mode 100644 index 000000000..42f9f1741 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_reservations.go @@ -0,0 +1,147 @@ +package compute + +// IPReservationsClient is a client for the IP Reservations functions of the Compute API. +type IPReservationsClient struct { + *ResourceClient +} + +const ( + IPReservationDesc = "ip reservation" + IPReservationContainerPath = "/ip/reservation/" + IPReservataionResourcePath = "/ip/reservation" +) + +// IPReservations obtains an IPReservationsClient which can be used to access to the +// IP Reservations functions of the Compute API +func (c *ComputeClient) IPReservations() *IPReservationsClient { + return &IPReservationsClient{ + ResourceClient: &ResourceClient{ + ComputeClient: c, + ResourceDescription: IPReservationDesc, + ContainerPath: IPReservationContainerPath, + ResourceRootPath: IPReservataionResourcePath, + }} +} + +type IPReservationPool string + +const ( + PublicReservationPool IPReservationPool = "/oracle/public/ippool" +) + +// IPReservationInput describes an existing IP reservation. +type IPReservation struct { + // Shows the default account for your identity domain. + Account string `json:"account"` + // Public IP address. + IP string `json:"ip"` + // The three-part name of the IP Reservation (/Compute-identity_domain/user/object). + Name string `json:"name"` + // Pool of public IP addresses + ParentPool IPReservationPool `json:"parentpool"` + // Is the IP Reservation Persistent (i.e. static) or not (i.e. Dynamic)? + Permanent bool `json:"permanent"` + // A comma-separated list of strings which helps you to identify IP reservation. + Tags []string `json:"tags"` + // Uniform Resource Identifier + Uri string `json:"uri"` + // Is the IP reservation associated with an instance? + Used bool `json:"used"` +} + +// CreateIPReservationInput defines an IP reservation to be created. +type CreateIPReservationInput struct { + // The name of the object + // If you don't specify a name for this object, then the name is generated automatically. + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. + // Object names are case-sensitive. + // Optional + Name string `json:"name"` + // Pool of public IP addresses. This must be set to `ippool` + // Required + ParentPool IPReservationPool `json:"parentpool"` + // Is the IP Reservation Persistent (i.e. static) or not (i.e. Dynamic)? + // Required + Permanent bool `json:"permanent"` + // A comma-separated list of strings which helps you to identify IP reservations. + // Optional + Tags []string `json:"tags"` +} + +// CreateIPReservation creates a new IP reservation with the given parentpool, tags and permanent flag. +func (c *IPReservationsClient) CreateIPReservation(input *CreateIPReservationInput) (*IPReservation, error) { + var ipInput IPReservation + + input.Name = c.getQualifiedName(input.Name) + if err := c.createResource(input, &ipInput); err != nil { + return nil, err + } + + return c.success(&ipInput) +} + +// GetIPReservationInput defines an IP Reservation to get +type GetIPReservationInput struct { + // The name of the IP Reservation + // Required + Name string +} + +// GetIPReservation retrieves the IP reservation with the given name. +func (c *IPReservationsClient) GetIPReservation(input *GetIPReservationInput) (*IPReservation, error) { + var ipInput IPReservation + + input.Name = c.getQualifiedName(input.Name) + if err := c.getResource(input.Name, &ipInput); err != nil { + return nil, err + } + + return c.success(&ipInput) +} + +// UpdateIPReservationInput defines an IP Reservation to be updated +type UpdateIPReservationInput struct { + // The name of the object + // If you don't specify a name for this object, then the name is generated automatically. + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. + // Object names are case-sensitive. + // Required + Name string `json:"name"` + // Pool of public IP addresses. + // Required + ParentPool IPReservationPool `json:"parentpool"` + // Is the IP Reservation Persistent (i.e. static) or not (i.e. Dynamic)? + // Required + Permanent bool `json:"permanent"` + // A comma-separated list of strings which helps you to identify IP reservations. + // Optional + Tags []string `json:"tags"` +} + +// UpdateIPReservation updates the IP reservation. +func (c *IPReservationsClient) UpdateIPReservation(input *UpdateIPReservationInput) (*IPReservation, error) { + var updateOutput IPReservation + input.Name = c.getQualifiedName(input.Name) + if err := c.updateResource(input.Name, input, &updateOutput); err != nil { + return nil, err + } + return c.success(&updateOutput) +} + +// DeleteIPReservationInput defines an IP Reservation to delete +type DeleteIPReservationInput struct { + // The name of the IP Reservation + // Required + Name string +} + +// DeleteIPReservation deletes the IP reservation with the given name. +func (c *IPReservationsClient) DeleteIPReservation(input *DeleteIPReservationInput) error { + input.Name = c.getQualifiedName(input.Name) + return c.deleteResource(input.Name) +} + +func (c *IPReservationsClient) success(result *IPReservation) (*IPReservation, error) { + c.unqualify(&result.Name) + return result, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/machine_images.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/machine_images.go new file mode 100644 index 000000000..bf7736a63 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/machine_images.go @@ -0,0 +1,143 @@ +package compute + +// MachineImagesClient is a client for the MachineImage functions of the Compute API. +type MachineImagesClient struct { + ResourceClient +} + +// MachineImages obtains an MachineImagesClient which can be used to access to the +// MachineImage functions of the Compute API +func (c *ComputeClient) MachineImages() *MachineImagesClient { + return &MachineImagesClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: "MachineImage", + ContainerPath: "/machineimage/", + ResourceRootPath: "/machineimage", + }} +} + +// MahineImage describes an existing Machine Image. +type MachineImage struct { + // account of the associated Object Storage Classic instance + Account string `json:"account"` + + // Dictionary of attributes to be made available to the instance + Attributes map[string]interface{} `json:"attributes"` + + // Last time when this image was audited + Audited string `json:"audited"` + + // Describing the image + Description string `json:"description"` + + // Description of the state of the machine image if there is an error + ErrorReason string `json:"error_reason"` + + // dictionary of hypervisor-specific attributes + Hypervisor map[string]interface{} `json:"hypervisor"` + + // The format of the image + ImageFormat string `json:"image_format"` + + // name of the machine image file uploaded to Object Storage Classic + File string `json:"file"` + + // name of the machine image + Name string `json:"name"` + + // Indicates that the image file is available in Object Storage Classic + NoUpload bool `json:"no_upload"` + + // The OS platform of the image + Platform string `json:"platform"` + + // Size values of the image file + Sizes map[string]interface{} `json:"sizes"` + + // The state of the uploaded machine image + State string `json:"state"` + + // Uniform Resource Identifier + URI string `json:"uri"` +} + +// CreateMachineImageInput defines an Image List to be created. +type CreateMachineImageInput struct { + // account of the associated Object Storage Classic instance + Account string `json:"account"` + + // Dictionary of attributes to be made available to the instance + Attributes map[string]interface{} `json:"attributes,omitempty"` + + // Describing the image + Description string `json:"description,omitempty"` + + // name of the machine image file uploaded to Object Storage Classic + File string `json:"file,omitempty"` + + // name of the machine image + Name string `json:"name"` + + // Indicates that the image file is available in Object Storage Classic + NoUpload bool `json:"no_upload"` + + // Size values of the image file + Sizes map[string]interface{} `json:"sizes"` +} + +// CreateMachineImage creates a new Machine Image with the given parameters. +func (c *MachineImagesClient) CreateMachineImage(createInput *CreateMachineImageInput) (*MachineImage, error) { + var machineImage MachineImage + + // If `sizes` is not set then is mst be defaulted to {"total": 0} + if createInput.Sizes == nil { + createInput.Sizes = map[string]interface{}{"total": 0} + } + + // `no_upload` must always be true + createInput.NoUpload = true + + createInput.Name = c.getQualifiedName(createInput.Name) + if err := c.createResource(createInput, &machineImage); err != nil { + return nil, err + } + + return c.success(&machineImage) +} + +// DeleteMachineImageInput describes the MachineImage to delete +type DeleteMachineImageInput struct { + // The name of the MachineImage + Name string `json:name` +} + +// DeleteMachineImage deletes the MachineImage with the given name. +func (c *MachineImagesClient) DeleteMachineImage(deleteInput *DeleteMachineImageInput) error { + return c.deleteResource(deleteInput.Name) +} + +// GetMachineList describes the MachineImage to get +type GetMachineImageInput struct { + // account of the associated Object Storage Classic instance + Account string `json:"account"` + // The name of the Machine Image + Name string `json:name` +} + +// GetMachineImage retrieves the MachineImage with the given name. +func (c *MachineImagesClient) GetMachineImage(getInput *GetMachineImageInput) (*MachineImage, error) { + getInput.Name = c.getQualifiedName(getInput.Name) + + var machineImage MachineImage + if err := c.getResource(getInput.Name, &machineImage); err != nil { + return nil, err + } + + return c.success(&machineImage) +} + +func (c *MachineImagesClient) success(result *MachineImage) (*MachineImage, error) { + c.unqualify(&result.Name) + return result, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/orchestration.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/orchestration.go new file mode 100644 index 000000000..684e0d45f --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/orchestration.go @@ -0,0 +1,451 @@ +package compute + +import ( + "fmt" + "time" + + "github.com/hashicorp/go-oracle-terraform/client" +) + +const WaitForOrchestrationActiveTimeout = time.Duration(3600 * time.Second) +const WaitForOrchestrationDeleteTimeout = time.Duration(3600 * time.Second) + +// OrchestrationsClient is a client for the Orchestration functions of the Compute API. +type OrchestrationsClient struct { + ResourceClient +} + +// Orchestrations obtains an OrchestrationsClient which can be used to access to the +// Orchestration functions of the Compute API +func (c *ComputeClient) Orchestrations() *OrchestrationsClient { + return &OrchestrationsClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: "Orchestration", + ContainerPath: "/platform/v1/orchestration/", + ResourceRootPath: "/platform/v1/orchestration", + }} +} + +type OrchestrationDesiredState string + +const ( + // * active: Creates all the orchestration objects defined in the orchestration. + OrchestrationDesiredStateActive OrchestrationDesiredState = "active" + // * inactive: Adds the orchestration to Oracle Compute Cloud Service, but does not create any of the orchestration + OrchestrationDesiredStateInactive OrchestrationDesiredState = "inactive" + // * suspended: Suspends all orchestration objects defined in the orchestration + OrchestrationDesiredStateSuspend OrchestrationDesiredState = "suspend" +) + +type OrchestrationStatus string + +const ( + OrchestrationStatusActive OrchestrationStatus = "active" + OrchestrationStatusInactive OrchestrationStatus = "inactive" + OrchestrationStatusSuspend OrchestrationStatus = "suspend" + OrchestrationStatusActivating OrchestrationStatus = "activating" + OrchestrationStatusDeleting OrchestrationStatus = "deleting" + OrchestrationStatusError OrchestrationStatus = "terminal_error" + OrchestrationStatusStopping OrchestrationStatus = "stopping" + OrchestrationStatusSuspending OrchestrationStatus = "suspending" + OrchestrationStatusStarting OrchestrationStatus = "starting" + OrchestrationStatusDeactivating OrchestrationStatus = "deactivating" + OrchestrationStatusSuspended OrchestrationStatus = "suspended" +) + +type OrchestrationType string + +const ( + OrchestrationTypeInstance OrchestrationType = "Instance" +) + +// OrchestrationInfo describes an existing Orchestration. +type Orchestration struct { + // The default Oracle Compute Cloud Service account, such as /Compute-acme/default. + Account string `json:"account"` + // Description of this orchestration + Description string `json:"description"` + // The desired_state specified in the orchestration JSON file. A unique identifier for this orchestration. + DesiredState OrchestrationDesiredState `json:"desired_state"` + // Unique identifier of this orchestration + ID string `json:"id"` + // The three-part name of the Orchestration (/Compute-identity_domain/user/object). + Name string `json:"name"` + // List of orchestration objects + Objects []Object `json:"objects"` + // Current status of this orchestration + Status OrchestrationStatus `json:"status"` + // Strings that describe the orchestration and help you identify it. + Tags []string `json:"tags"` + // Time the orchestration was last audited + TimeAudited string `json:"time_audited"` + // The time when the orchestration was added to Oracle Compute Cloud Service. + TimeCreated string `json:"time_created"` + // The time when the orchestration was last updated in Oracle Compute Cloud Service. + TimeUpdated string `json:"time_updated"` + // Unique Resource Identifier + URI string `json:"uri"` + // Name of the user who added this orchestration or made the most recent update to this orchestration. + User string `json:"user"` + // Version of this orchestration. It is automatically generated by the server. + Version int `json:"version"` +} + +// CreateOrchestrationInput defines an Orchestration to be created. +type CreateOrchestrationInput struct { + // The default Oracle Compute Cloud Service account, such as /Compute-acme/default. + // Optional + Account string `json:"account,omitempty"` + // Description of this orchestration + // Optional + Description string `json:"description,omitempty"` + // Specify the desired state of this orchestration: active, inactive, or suspend. + // You can manage the state of the orchestration objects by changing the desired state of the orchestration. + // * active: Creates all the orchestration objects defined in the orchestration. + // * inactive: Adds the orchestration to Oracle Compute Cloud Service, but does not create any of the orchestration + // objects defined in the orchestration. + // Required + DesiredState OrchestrationDesiredState `json:"desired_state"` + // The three-part name of the Orchestration (/Compute-identity_domain/user/object). + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Required + Name string `json:"name"` + // The list of objects in the orchestration. An object is the primary building block of an orchestration. + // An orchestration can contain up to 100 objects. + // Required + Objects []Object `json:"objects"` + // Strings that describe the orchestration and help you identify it. + Tags []string `json:"tags,omitempty"` + // Version of this orchestration. It is automatically generated by the server. + Version int `json:"version,omitempty"` + // Time to wait for an orchestration to be ready + Timeout time.Duration `json:"-"` +} + +type Object struct { + // The default Oracle Compute Cloud Service account, such as /Compute-acme/default. + // Optional + Account string `json:"account,omitempty"` + // Description of this orchestration + // Optional + Description string `json:"description,omitempty"` + // The desired state of the object + // Optional + DesiredState OrchestrationDesiredState `json:"desired_state,omitempty"` + // Dictionary containing the current state of the object + Health Health `json:"health,omitempty"` + // A text string describing the object. Labels can't include spaces. In an orchestration, the label for + // each object must be unique. Maximum length is 256 characters. + // Required + Label string `json:"label"` + // The four-part name of the object (/Compute-identity_domain/user/orchestration/object). If you don't specify a name + // for this object, the name is generated automatically. Object names can contain only alphanumeric characters, hyphens, + // underscores, and periods. Object names are case-sensitive. When you specify the object name, ensure that an object of + // the same type and with the same name doesn't already exist. If such a object already exists, then another + // object of the same type and with the same name won't be created and the existing object won't be updated. + // Optional + Name string `json:"name,omitempty"` + // The three-part name (/Compute-identity_domain/user/object) of the orchestration to which the object belongs. + // Required + Orchestration string `json:"orchestration"` + // Specifies whether the object should persist when the orchestration is suspended. Specify one of the following: + // * true: The object persists when the orchestration is suspended. + // * false: The object is deleted when the orchestration is suspended. + // By default, persistent is set to false. It is recommended that you specify true for storage + // volumes and other critical objects. Persistence applies only when you're suspending an orchestration. + // When you terminate an orchestration, all the objects defined in it are deleted. + // Optional + Persistent bool `json:"persistent,omitempty"` + // The relationship between the objects that are created by this orchestration. The + // only supported relationship is depends, indicating that the specified target objects must be created first. + // Note that when recovering from a failure, the orchestration doesn't consider object relationships. + // Orchestrations v2 use object references to recover interdependent objects to a healthy state. SeeObject + // References and Relationships in Using Oracle Compute Cloud Service (IaaS). + Relationship []Object `json:"relationships,omitempty"` + // The template attribute defines the properties or characteristics of the Oracle Compute Cloud Service object + // that you want to create, as specified by the type attribute. + // The fields in the template section vary depending on the specified type. See Orchestration v2 Attributes + // Specific to Each Object Type in Using Oracle Compute Cloud Service (IaaS) to determine the parameters that are + // specific to each object type that you want to create. + // For example, if you want to create a storage volume, the type would be StorageVolume, and the template would include + // size and bootable. If you want to create an instance, the type would be Instance, and the template would include + // instance-specific attributes, such as imagelist and shape. + // Required + Template interface{} `json:"template"` + // Specify one of the following object types that you want to create. + // The only allowed type is Instance + // Required + Type OrchestrationType `json:"type"` + // Version of this object, generated by the server + // Optional + Version int `json:"version,omitempty"` +} + +type Health struct { + // The status of the object + Status OrchestrationStatus `json:"status,omitempty"` + // What caused the status of the object + Cause string `json:"cause,omitempty"` + // The specific details for what happened to the object + Detail string `json:"detail,omitempty"` + // Any errors associated with creation of the object + Error string `json:"error,omitempty"` +} + +// CreateOrchestration creates a new Orchestration with the given name, key and enabled flag. +func (c *OrchestrationsClient) CreateOrchestration(input *CreateOrchestrationInput) (*Orchestration, error) { + var createdOrchestration Orchestration + + input.Name = c.getQualifiedName(input.Name) + for _, i := range input.Objects { + i.Orchestration = c.getQualifiedName(i.Orchestration) + if i.Type == OrchestrationTypeInstance { + instanceClient := c.ComputeClient.Instances() + instanceInput := i.Template.(*CreateInstanceInput) + instanceInput.Name = c.getQualifiedName(instanceInput.Name) + + qualifiedSSHKeys := []string{} + for _, key := range instanceInput.SSHKeys { + qualifiedSSHKeys = append(qualifiedSSHKeys, c.getQualifiedName(key)) + } + + instanceInput.SSHKeys = qualifiedSSHKeys + + qualifiedStorageAttachments := []StorageAttachmentInput{} + for _, attachment := range instanceInput.Storage { + qualifiedStorageAttachments = append(qualifiedStorageAttachments, StorageAttachmentInput{ + Index: attachment.Index, + Volume: c.getQualifiedName(attachment.Volume), + }) + } + instanceInput.Storage = qualifiedStorageAttachments + + instanceInput.Networking = instanceClient.qualifyNetworking(instanceInput.Networking) + } + } + + if err := c.createResource(&input, &createdOrchestration); err != nil { + return nil, err + } + + // Call wait for orchestration ready now, as creating the orchestration is an eventually consistent operation + getInput := &GetOrchestrationInput{ + Name: createdOrchestration.Name, + } + + if input.Timeout == 0 { + input.Timeout = WaitForOrchestrationActiveTimeout + } + + // Wait for orchestration to be ready and return the result + // Don't have to unqualify any objects, as the GetOrchestration method will handle that + orchestrationInfo, orchestrationError := c.WaitForOrchestrationState(getInput, input.Timeout) + if orchestrationError != nil { + deleteInput := &DeleteOrchestrationInput{ + Name: createdOrchestration.Name, + } + err := c.DeleteOrchestration(deleteInput) + if err != nil { + return nil, fmt.Errorf("Error deleting orchestration %s: %s", getInput.Name, err) + } + return nil, fmt.Errorf("Error creating orchestration %s: %s", getInput.Name, orchestrationError) + } + + return &orchestrationInfo, nil +} + +// GetOrchestrationInput describes the Orchestration to get +type GetOrchestrationInput struct { + // The three-part name of the Orchestration (/Compute-identity_domain/user/object). + Name string `json:name` +} + +// GetOrchestration retrieves the Orchestration with the given name. +func (c *OrchestrationsClient) GetOrchestration(input *GetOrchestrationInput) (*Orchestration, error) { + var orchestrationInfo Orchestration + if err := c.getResource(input.Name, &orchestrationInfo); err != nil { + return nil, err + } + + return c.success(&orchestrationInfo) +} + +// UpdateOrchestrationInput defines an Orchestration to be updated +type UpdateOrchestrationInput struct { + // The default Oracle Compute Cloud Service account, such as /Compute-acme/default. + // Optional + Account string `json:"account,omitempty"` + // Description of this orchestration + // Optional + Description string `json:"description,omitempty"` + // Specify the desired state of this orchestration: active, inactive, or suspend. + // You can manage the state of the orchestration objects by changing the desired state of the orchestration. + // * active: Creates all the orchestration objects defined in the orchestration. + // * inactive: Adds the orchestration to Oracle Compute Cloud Service, but does not create any of the orchestration + // objects defined in the orchestration. + // Required + DesiredState OrchestrationDesiredState `json:"desired_state"` + // The three-part name of the Orchestration (/Compute-identity_domain/user/object). + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Required + Name string `json:"name"` + // The list of objects in the orchestration. An object is the primary building block of an orchestration. + // An orchestration can contain up to 100 objects. + // Required + Objects []Object `json:"objects"` + // Strings that describe the orchestration and help you identify it. + Tags []string `json:"tags,omitempty"` + // Version of this orchestration. It is automatically generated by the server. + Version int `json:"version,omitempty"` + // Time to wait for an orchestration to be ready + Timeout time.Duration `json:"-"` +} + +// UpdateOrchestration updates the orchestration. +func (c *OrchestrationsClient) UpdateOrchestration(input *UpdateOrchestrationInput) (*Orchestration, error) { + var updatedOrchestration Orchestration + input.Name = c.getQualifiedName(input.Name) + for _, i := range input.Objects { + i.Orchestration = c.getQualifiedName(i.Orchestration) + if i.Type == OrchestrationTypeInstance { + instanceInput := i.Template.(map[string]interface{}) + instanceInput["name"] = c.getQualifiedName(instanceInput["name"].(string)) + } + } + + if err := c.updateResource(input.Name, input, &updatedOrchestration); err != nil { + return nil, err + } + + // Call wait for orchestration ready now, as creating the orchestration is an eventually consistent operation + getInput := &GetOrchestrationInput{ + Name: updatedOrchestration.Name, + } + + if input.Timeout == 0 { + input.Timeout = WaitForOrchestrationActiveTimeout + } + + // Wait for orchestration to be ready and return the result + // Don't have to unqualify any objects, as the GetOrchestration method will handle that + orchestrationInfo, orchestrationError := c.WaitForOrchestrationState(getInput, input.Timeout) + if orchestrationError != nil { + return nil, orchestrationError + } + + return &orchestrationInfo, nil +} + +// DeleteOrchestrationInput describes the Orchestration to delete +type DeleteOrchestrationInput struct { + // The three-part name of the Orchestration (/Compute-identity_domain/user/object). + // Required + Name string `json:name` + // Timeout for delete request + Timeout time.Duration `json:"-"` +} + +// DeleteOrchestration deletes the Orchestration with the given name. +func (c *OrchestrationsClient) DeleteOrchestration(input *DeleteOrchestrationInput) error { + if err := c.deleteOrchestration(input.Name); err != nil { + return err + } + + if input.Timeout == 0 { + input.Timeout = WaitForOrchestrationDeleteTimeout + } + + return c.WaitForOrchestrationDeleted(input, input.Timeout) +} + +func (c *OrchestrationsClient) success(info *Orchestration) (*Orchestration, error) { + c.unqualify(&info.Name) + for _, i := range info.Objects { + c.unqualify(&i.Orchestration) + if OrchestrationType(i.Type) == OrchestrationTypeInstance { + instanceInput := i.Template.(map[string]interface{}) + instanceInput["name"] = c.getUnqualifiedName(instanceInput["name"].(string)) + } + } + + return info, nil +} + +// WaitForOrchestrationActive waits for an orchestration to be completely initialized and available. +func (c *OrchestrationsClient) WaitForOrchestrationState(input *GetOrchestrationInput, timeout time.Duration) (Orchestration, error) { + var info *Orchestration + var getErr error + err := c.client.WaitFor("orchestration to be ready", timeout, func() (bool, error) { + info, getErr = c.GetOrchestration(input) + if getErr != nil { + return false, getErr + } + c.client.DebugLogString(fmt.Sprintf("Orchestration name is %v, Orchestration info is %+v", info.Name, info)) + switch s := info.Status; s { + case OrchestrationStatusError: + // We need to check and see if an object the orchestration is trying to create is giving us an error instead of just the orchestration as a whole. + for _, object := range info.Objects { + if object.Health.Status == OrchestrationStatusError { + return false, fmt.Errorf("Error creating instance %s: %+v", object.Name, object.Health) + } + } + return false, fmt.Errorf("Error initializing orchestration: %+v", info) + case OrchestrationStatus(info.DesiredState): + c.client.DebugLogString(fmt.Sprintf("Orchestration %s", info.DesiredState)) + return true, nil + case OrchestrationStatusActivating: + c.client.DebugLogString("Orchestration activating") + return false, nil + case OrchestrationStatusStopping: + c.client.DebugLogString("Orchestration stopping") + return false, nil + case OrchestrationStatusSuspending: + c.client.DebugLogString("Orchestration suspending") + return false, nil + case OrchestrationStatusDeactivating: + c.client.DebugLogString("Orchestration deactivating") + return false, nil + case OrchestrationStatusSuspended: + c.client.DebugLogString("Orchestration suspended") + if info.DesiredState == OrchestrationDesiredStateSuspend { + return true, nil + } else { + return false, nil + } + default: + return false, fmt.Errorf("Unknown orchestration state: %s, erroring", s) + } + }) + return *info, err +} + +// WaitForOrchestrationDeleted waits for an orchestration to be fully deleted. +func (c *OrchestrationsClient) WaitForOrchestrationDeleted(input *DeleteOrchestrationInput, timeout time.Duration) error { + return c.client.WaitFor("orchestration to be deleted", timeout, func() (bool, error) { + var info Orchestration + if err := c.getResource(input.Name, &info); err != nil { + if client.WasNotFoundError(err) { + // Orchestration could not be found, thus deleted + return true, nil + } + // Some other error occurred trying to get Orchestration, exit + return false, err + } + switch s := info.Status; s { + case OrchestrationStatusError: + return false, fmt.Errorf("Error stopping orchestration: %+v", info) + case OrchestrationStatusStopping: + c.client.DebugLogString("Orchestration stopping") + return false, nil + case OrchestrationStatusDeleting: + c.client.DebugLogString("Orchestration deleting") + return false, nil + case OrchestrationStatusActive: + c.client.DebugLogString("Orchestration active") + return false, nil + default: + return false, fmt.Errorf("Unknown orchestration state: %s, erroring", s) + } + }) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/routes.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/routes.go new file mode 100644 index 000000000..f46beaecf --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/routes.go @@ -0,0 +1,153 @@ +package compute + +const ( + RoutesDescription = "IP Network Route" + RoutesContainerPath = "/network/v1/route/" + RoutesResourcePath = "/network/v1/route" +) + +type RoutesClient struct { + ResourceClient +} + +func (c *ComputeClient) Routes() *RoutesClient { + return &RoutesClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: RoutesDescription, + ContainerPath: RoutesContainerPath, + ResourceRootPath: RoutesResourcePath, + }, + } +} + +type RouteInfo struct { + // Admin distance associated with this route + AdminDistance int `json:"adminDistance"` + // Description of the route + Description string `json:"description"` + // CIDR IPv4 Prefix associated with this route + IPAddressPrefix string `json:"ipAddressPrefix"` + // Name of the route + Name string `json:"name"` + // Name of the VNIC set associated with the route + NextHopVnicSet string `json:"nextHopVnicSet"` + // Slice of Tags associated with the route + Tags []string `json:"tags,omitempty"` + // Uniform resource identifier associated with the route + Uri string `json:"uri"` +} + +type CreateRouteInput struct { + // Specify 0,1, or 2 as the route's administrative distance. + // If you do not specify a value, the default value is 0. + // The same prefix can be used in multiple routes. In this case, packets are routed over all the matching + // routes with the lowest administrative distance. + // In the case multiple routes with the same lowest administrative distance match, + // routing occurs over all these routes using ECMP. + // Optional + AdminDistance int `json:"adminDistance"` + // Description of the route + // Optional + Description string `json:"description"` + // The IPv4 address prefix in CIDR format, of the external network (external to the vNIC set) + // from which you want to route traffic + // Required + IPAddressPrefix string `json:"ipAddressPrefix"` + // Name of the route. + // Names can only contain alphanumeric, underscore, dash, and period characters. Case-sensitive + // Required + Name string `json:"name"` + // Name of the virtual NIC set to route matching packets to. + // Routed flows are load-balanced among all the virtual NICs in the virtual NIC set + // Required + NextHopVnicSet string `json:"nextHopVnicSet"` + // Slice of tags to be associated with the route + // Optional + Tags []string `json:"tags,omitempty"` +} + +func (c *RoutesClient) CreateRoute(input *CreateRouteInput) (*RouteInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.NextHopVnicSet = c.getQualifiedName(input.NextHopVnicSet) + + var routeInfo RouteInfo + if err := c.createResource(&input, &routeInfo); err != nil { + return nil, err + } + + return c.success(&routeInfo) +} + +type GetRouteInput struct { + // Name of the Route to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *RoutesClient) GetRoute(input *GetRouteInput) (*RouteInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var routeInfo RouteInfo + if err := c.getResource(input.Name, &routeInfo); err != nil { + return nil, err + } + return c.success(&routeInfo) +} + +type UpdateRouteInput struct { + // Specify 0,1, or 2 as the route's administrative distance. + // If you do not specify a value, the default value is 0. + // The same prefix can be used in multiple routes. In this case, packets are routed over all the matching + // routes with the lowest administrative distance. + // In the case multiple routes with the same lowest administrative distance match, + // routing occurs over all these routes using ECMP. + // Optional + AdminDistance int `json:"adminDistance"` + // Description of the route + // Optional + Description string `json:"description"` + // The IPv4 address prefix in CIDR format, of the external network (external to the vNIC set) + // from which you want to route traffic + // Required + IPAddressPrefix string `json:"ipAddressPrefix"` + // Name of the route. + // Names can only contain alphanumeric, underscore, dash, and period characters. Case-sensitive + // Required + Name string `json:"name"` + // Name of the virtual NIC set to route matching packets to. + // Routed flows are load-balanced among all the virtual NICs in the virtual NIC set + // Required + NextHopVnicSet string `json:"nextHopVnicSet"` + // Slice of tags to be associated with the route + // Optional + Tags []string `json:"tags"` +} + +func (c *RoutesClient) UpdateRoute(input *UpdateRouteInput) (*RouteInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.NextHopVnicSet = c.getQualifiedName(input.NextHopVnicSet) + + var routeInfo RouteInfo + if err := c.updateResource(input.Name, &input, &routeInfo); err != nil { + return nil, err + } + + return c.success(&routeInfo) +} + +type DeleteRouteInput struct { + // Name of the Route to delete. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *RoutesClient) DeleteRoute(input *DeleteRouteInput) error { + return c.deleteResource(input.Name) +} + +func (c *RoutesClient) success(info *RouteInfo) (*RouteInfo, error) { + c.unqualify(&info.Name) + c.unqualify(&info.NextHopVnicSet) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/sec_rules.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/sec_rules.go new file mode 100644 index 000000000..b45df047d --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/sec_rules.go @@ -0,0 +1,193 @@ +package compute + +// SecRulesClient is a client for the Sec Rules functions of the Compute API. +type SecRulesClient struct { + ResourceClient +} + +// SecRules obtains a SecRulesClient which can be used to access to the +// Sec Rules functions of the Compute API +func (c *ComputeClient) SecRules() *SecRulesClient { + return &SecRulesClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: "security ip list", + ContainerPath: "/secrule/", + ResourceRootPath: "/secrule", + }} +} + +// SecRuleInfo describes an existing sec rule. +type SecRuleInfo struct { + // Set this parameter to PERMIT. + Action string `json:"action"` + // The name of the security application + Application string `json:"application"` + // A description of the sec rule + Description string `json:"description"` + // Indicates whether the security rule is enabled + Disabled bool `json:"disabled"` + // The name of the destination security list or security IP list. + DestinationList string `json:"dst_list"` + // The name of the sec rule + Name string `json:"name"` + // The name of the source security list or security IP list. + SourceList string `json:"src_list"` + // Uniform Resource Identifier for the sec rule + URI string `json:"uri"` +} + +// CreateSecRuleInput defines a sec rule to be created. +type CreateSecRuleInput struct { + // Set this parameter to PERMIT. + // Required + Action string `json:"action"` + + // The name of the security application for user-defined or predefined security applications. + // Required + Application string `json:"application"` + + // Description of the IP Network + // Optional + Description string `json:"description"` + + // Indicates whether the sec rule is enabled (set to false) or disabled (true). + // The default setting is false. + // Optional + Disabled bool `json:"disabled"` + + // The name of the destination security list or security IP list. + // + // You must use the prefix seclist: or seciplist: to identify the list type. + // + // You can specify a security IP list as the destination in a secrule, + // provided src_list is a security list that has DENY as its outbound policy. + // + // You cannot specify any of the security IP lists in the /oracle/public container + // as a destination in a secrule. + // Required + DestinationList string `json:"dst_list"` + + // The name of the Sec Rule to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // The name of the source security list or security IP list. + // + // You must use the prefix seclist: or seciplist: to identify the list type. + // + // Required + SourceList string `json:"src_list"` +} + +// CreateSecRule creates a new sec rule. +func (c *SecRulesClient) CreateSecRule(createInput *CreateSecRuleInput) (*SecRuleInfo, error) { + createInput.Name = c.getQualifiedName(createInput.Name) + createInput.SourceList = c.getQualifiedListName(createInput.SourceList) + createInput.DestinationList = c.getQualifiedListName(createInput.DestinationList) + createInput.Application = c.getQualifiedName(createInput.Application) + + var ruleInfo SecRuleInfo + if err := c.createResource(createInput, &ruleInfo); err != nil { + return nil, err + } + + return c.success(&ruleInfo) +} + +// GetSecRuleInput describes the Sec Rule to get +type GetSecRuleInput struct { + // The name of the Sec Rule to query for + // Required + Name string `json:"name"` +} + +// GetSecRule retrieves the sec rule with the given name. +func (c *SecRulesClient) GetSecRule(getInput *GetSecRuleInput) (*SecRuleInfo, error) { + var ruleInfo SecRuleInfo + if err := c.getResource(getInput.Name, &ruleInfo); err != nil { + return nil, err + } + + return c.success(&ruleInfo) +} + +// UpdateSecRuleInput describes a secruity rule to update +type UpdateSecRuleInput struct { + // Set this parameter to PERMIT. + // Required + Action string `json:"action"` + + // The name of the security application for user-defined or predefined security applications. + // Required + Application string `json:"application"` + + // Description of the IP Network + // Optional + Description string `json:"description"` + + // Indicates whether the sec rule is enabled (set to false) or disabled (true). + // The default setting is false. + // Optional + Disabled bool `json:"disabled"` + + // The name of the destination security list or security IP list. + // + // You must use the prefix seclist: or seciplist: to identify the list type. + // + // You can specify a security IP list as the destination in a secrule, + // provided src_list is a security list that has DENY as its outbound policy. + // + // You cannot specify any of the security IP lists in the /oracle/public container + // as a destination in a secrule. + // Required + DestinationList string `json:"dst_list"` + + // The name of the Sec Rule to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // The name of the source security list or security IP list. + // + // You must use the prefix seclist: or seciplist: to identify the list type. + // + // Required + SourceList string `json:"src_list"` +} + +// UpdateSecRule modifies the properties of the sec rule with the given name. +func (c *SecRulesClient) UpdateSecRule(updateInput *UpdateSecRuleInput) (*SecRuleInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + updateInput.SourceList = c.getQualifiedListName(updateInput.SourceList) + updateInput.DestinationList = c.getQualifiedListName(updateInput.DestinationList) + updateInput.Application = c.getQualifiedName(updateInput.Application) + + var ruleInfo SecRuleInfo + if err := c.updateResource(updateInput.Name, updateInput, &ruleInfo); err != nil { + return nil, err + } + + return c.success(&ruleInfo) +} + +// DeleteSecRuleInput describes the sec rule to delete +type DeleteSecRuleInput struct { + // The name of the Sec Rule to delete. + // Required + Name string `json:"name"` +} + +// DeleteSecRule deletes the sec rule with the given name. +func (c *SecRulesClient) DeleteSecRule(deleteInput *DeleteSecRuleInput) error { + return c.deleteResource(deleteInput.Name) +} + +func (c *SecRulesClient) success(ruleInfo *SecRuleInfo) (*SecRuleInfo, error) { + ruleInfo.Name = c.getUnqualifiedName(ruleInfo.Name) + ruleInfo.SourceList = c.unqualifyListName(ruleInfo.SourceList) + ruleInfo.DestinationList = c.unqualifyListName(ruleInfo.DestinationList) + ruleInfo.Application = c.getUnqualifiedName(ruleInfo.Application) + return ruleInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_applications.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_applications.go new file mode 100644 index 000000000..c473ecb83 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_applications.go @@ -0,0 +1,150 @@ +package compute + +// SecurityApplicationsClient is a client for the Security Application functions of the Compute API. +type SecurityApplicationsClient struct { + ResourceClient +} + +// SecurityApplications obtains a SecurityApplicationsClient which can be used to access to the +// Security Application functions of the Compute API +func (c *ComputeClient) SecurityApplications() *SecurityApplicationsClient { + return &SecurityApplicationsClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: "security application", + ContainerPath: "/secapplication/", + ResourceRootPath: "/secapplication", + }} +} + +// SecurityApplicationInfo describes an existing security application. +type SecurityApplicationInfo struct { + // A description of the security application. + Description string `json:"description"` + // The TCP or UDP destination port number. This can be a port range, such as 5900-5999 for TCP. + DPort string `json:"dport"` + // The ICMP code. + ICMPCode SecurityApplicationICMPCode `json:"icmpcode"` + // The ICMP type. + ICMPType SecurityApplicationICMPType `json:"icmptype"` + // The three-part name of the Security Application (/Compute-identity_domain/user/object). + Name string `json:"name"` + // The protocol to use. + Protocol SecurityApplicationProtocol `json:"protocol"` + // The Uniform Resource Identifier + URI string `json:"uri"` +} + +type SecurityApplicationProtocol string + +const ( + All SecurityApplicationProtocol = "all" + AH SecurityApplicationProtocol = "ah" + ESP SecurityApplicationProtocol = "esp" + ICMP SecurityApplicationProtocol = "icmp" + ICMPV6 SecurityApplicationProtocol = "icmpv6" + IGMP SecurityApplicationProtocol = "igmp" + IPIP SecurityApplicationProtocol = "ipip" + GRE SecurityApplicationProtocol = "gre" + MPLSIP SecurityApplicationProtocol = "mplsip" + OSPF SecurityApplicationProtocol = "ospf" + PIM SecurityApplicationProtocol = "pim" + RDP SecurityApplicationProtocol = "rdp" + SCTP SecurityApplicationProtocol = "sctp" + TCP SecurityApplicationProtocol = "tcp" + UDP SecurityApplicationProtocol = "udp" +) + +type SecurityApplicationICMPCode string + +const ( + Admin SecurityApplicationICMPCode = "admin" + Df SecurityApplicationICMPCode = "df" + Host SecurityApplicationICMPCode = "host" + Network SecurityApplicationICMPCode = "network" + Port SecurityApplicationICMPCode = "port" + Protocol SecurityApplicationICMPCode = "protocol" +) + +type SecurityApplicationICMPType string + +const ( + Echo SecurityApplicationICMPType = "echo" + Reply SecurityApplicationICMPType = "reply" + TTL SecurityApplicationICMPType = "ttl" + TraceRoute SecurityApplicationICMPType = "traceroute" + Unreachable SecurityApplicationICMPType = "unreachable" +) + +func (c *SecurityApplicationsClient) success(result *SecurityApplicationInfo) (*SecurityApplicationInfo, error) { + c.unqualify(&result.Name) + return result, nil +} + +// CreateSecurityApplicationInput describes the Security Application to create +type CreateSecurityApplicationInput struct { + // A description of the security application. + // Optional + Description string `json:"description"` + // The TCP or UDP destination port number. + // You can also specify a port range, such as 5900-5999 for TCP. + // This parameter isn't relevant to the icmp protocol. + // Required if the Protocol is TCP or UDP + DPort string `json:"dport"` + // The ICMP code. This parameter is relevant only if you specify ICMP as the protocol. + // If you specify icmp as the protocol and don't specify icmptype or icmpcode, then all ICMP packets are matched. + // Optional + ICMPCode SecurityApplicationICMPCode `json:"icmpcode,omitempty"` + // This parameter is relevant only if you specify ICMP as the protocol. + // If you specify icmp as the protocol and don't specify icmptype or icmpcode, then all ICMP packets are matched. + // Optional + ICMPType SecurityApplicationICMPType `json:"icmptype,omitempty"` + // The three-part name of the Security Application (/Compute-identity_domain/user/object). + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Required + Name string `json:"name"` + // The protocol to use. + // Required + Protocol SecurityApplicationProtocol `json:"protocol"` +} + +// CreateSecurityApplication creates a new security application. +func (c *SecurityApplicationsClient) CreateSecurityApplication(input *CreateSecurityApplicationInput) (*SecurityApplicationInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var appInfo SecurityApplicationInfo + if err := c.createResource(&input, &appInfo); err != nil { + return nil, err + } + + return c.success(&appInfo) +} + +// GetSecurityApplicationInput describes the Security Application to obtain +type GetSecurityApplicationInput struct { + // The three-part name of the Security Application (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// GetSecurityApplication retrieves the security application with the given name. +func (c *SecurityApplicationsClient) GetSecurityApplication(input *GetSecurityApplicationInput) (*SecurityApplicationInfo, error) { + var appInfo SecurityApplicationInfo + if err := c.getResource(input.Name, &appInfo); err != nil { + return nil, err + } + + return c.success(&appInfo) +} + +// DeleteSecurityApplicationInput describes the Security Application to delete +type DeleteSecurityApplicationInput struct { + // The three-part name of the Security Application (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// DeleteSecurityApplication deletes the security application with the given name. +func (c *SecurityApplicationsClient) DeleteSecurityApplication(input *DeleteSecurityApplicationInput) error { + return c.deleteResource(input.Name) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_associations.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_associations.go new file mode 100644 index 000000000..0b806f029 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_associations.go @@ -0,0 +1,95 @@ +package compute + +// SecurityAssociationsClient is a client for the Security Association functions of the Compute API. +type SecurityAssociationsClient struct { + ResourceClient +} + +// SecurityAssociations obtains a SecurityAssociationsClient which can be used to access to the +// Security Association functions of the Compute API +func (c *ComputeClient) SecurityAssociations() *SecurityAssociationsClient { + return &SecurityAssociationsClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: "security association", + ContainerPath: "/secassociation/", + ResourceRootPath: "/secassociation", + }} +} + +// SecurityAssociationInfo describes an existing security association. +type SecurityAssociationInfo struct { + // The three-part name of the Security Association (/Compute-identity_domain/user/object). + Name string `json:"name"` + // The name of the Security List that you want to associate with the instance. + SecList string `json:"seclist"` + // vCable of the instance that you want to associate with the security list. + VCable string `json:"vcable"` + // Uniform Resource Identifier + URI string `json:"uri"` +} + +// CreateSecurityAssociationInput defines a security association to be created. +type CreateSecurityAssociationInput struct { + // The three-part name of the Security Association (/Compute-identity_domain/user/object). + // If you don't specify a name for this object, then the name is generated automatically. + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Optional + Name string `json:"name"` + // The name of the Security list that you want to associate with the instance. + // Required + SecList string `json:"seclist"` + // The name of the vCable of the instance that you want to associate with the security list. + // Required + VCable string `json:"vcable"` +} + +// CreateSecurityAssociation creates a security association between the given VCable and security list. +func (c *SecurityAssociationsClient) CreateSecurityAssociation(createInput *CreateSecurityAssociationInput) (*SecurityAssociationInfo, error) { + if createInput.Name != "" { + createInput.Name = c.getQualifiedName(createInput.Name) + } + createInput.VCable = c.getQualifiedName(createInput.VCable) + createInput.SecList = c.getQualifiedName(createInput.SecList) + + var assocInfo SecurityAssociationInfo + if err := c.createResource(&createInput, &assocInfo); err != nil { + return nil, err + } + + return c.success(&assocInfo) +} + +// GetSecurityAssociationInput describes the security association to get +type GetSecurityAssociationInput struct { + // The three-part name of the Security Association (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// GetSecurityAssociation retrieves the security association with the given name. +func (c *SecurityAssociationsClient) GetSecurityAssociation(getInput *GetSecurityAssociationInput) (*SecurityAssociationInfo, error) { + var assocInfo SecurityAssociationInfo + if err := c.getResource(getInput.Name, &assocInfo); err != nil { + return nil, err + } + + return c.success(&assocInfo) +} + +// DeleteSecurityAssociationInput describes the security association to delete +type DeleteSecurityAssociationInput struct { + // The three-part name of the Security Association (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// DeleteSecurityAssociation deletes the security association with the given name. +func (c *SecurityAssociationsClient) DeleteSecurityAssociation(deleteInput *DeleteSecurityAssociationInput) error { + return c.deleteResource(deleteInput.Name) +} + +func (c *SecurityAssociationsClient) success(assocInfo *SecurityAssociationInfo) (*SecurityAssociationInfo, error) { + c.unqualify(&assocInfo.Name, &assocInfo.SecList, &assocInfo.VCable) + return assocInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_ip_lists.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_ip_lists.go new file mode 100644 index 000000000..28b313374 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_ip_lists.go @@ -0,0 +1,113 @@ +package compute + +// SecurityIPListsClient is a client for the Security IP List functions of the Compute API. +type SecurityIPListsClient struct { + ResourceClient +} + +// SecurityIPLists obtains a SecurityIPListsClient which can be used to access to the +// Security IP List functions of the Compute API +func (c *ComputeClient) SecurityIPLists() *SecurityIPListsClient { + return &SecurityIPListsClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: "security ip list", + ContainerPath: "/seciplist/", + ResourceRootPath: "/seciplist", + }} +} + +// SecurityIPListInfo describes an existing security IP list. +type SecurityIPListInfo struct { + // A description of the security IP list. + Description string `json:"description"` + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` + // A comma-separated list of the subnets (in CIDR format) or IPv4 addresses for which you want to create this security IP list. + SecIPEntries []string `json:"secipentries"` + // Uniform Resource Identifier + URI string `json:"uri"` +} + +// CreateSecurityIPListInput defines a security IP list to be created. +type CreateSecurityIPListInput struct { + // A description of the security IP list. + // Optional + Description string `json:"description"` + // The three-part name of the object (/Compute-identity_domain/user/object). + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Required + Name string `json:"name"` + // A comma-separated list of the subnets (in CIDR format) or IPv4 addresses for which you want to create this security IP list. + // Required + SecIPEntries []string `json:"secipentries"` +} + +// CreateSecurityIPList creates a security IP list with the given name and entries. +func (c *SecurityIPListsClient) CreateSecurityIPList(createInput *CreateSecurityIPListInput) (*SecurityIPListInfo, error) { + createInput.Name = c.getQualifiedName(createInput.Name) + var listInfo SecurityIPListInfo + if err := c.createResource(createInput, &listInfo); err != nil { + return nil, err + } + + return c.success(&listInfo) +} + +// GetSecurityIPListInput describes the Security IP List to obtain +type GetSecurityIPListInput struct { + // The three-part name of the object (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// GetSecurityIPList gets the security IP list with the given name. +func (c *SecurityIPListsClient) GetSecurityIPList(getInput *GetSecurityIPListInput) (*SecurityIPListInfo, error) { + var listInfo SecurityIPListInfo + if err := c.getResource(getInput.Name, &listInfo); err != nil { + return nil, err + } + + return c.success(&listInfo) +} + +// UpdateSecurityIPListInput describes the security ip list to update +type UpdateSecurityIPListInput struct { + // A description of the security IP list. + // Optional + Description string `json:"description"` + // The three-part name of the object (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` + // A comma-separated list of the subnets (in CIDR format) or IPv4 addresses for which you want to create this security IP list. + // Required + SecIPEntries []string `json:"secipentries"` +} + +// UpdateSecurityIPList modifies the entries in the security IP list with the given name. +func (c *SecurityIPListsClient) UpdateSecurityIPList(updateInput *UpdateSecurityIPListInput) (*SecurityIPListInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + var listInfo SecurityIPListInfo + if err := c.updateResource(updateInput.Name, updateInput, &listInfo); err != nil { + return nil, err + } + + return c.success(&listInfo) +} + +// DeleteSecurityIPListInput describes the security ip list to delete. +type DeleteSecurityIPListInput struct { + // The three-part name of the object (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// DeleteSecurityIPList deletes the security IP list with the given name. +func (c *SecurityIPListsClient) DeleteSecurityIPList(deleteInput *DeleteSecurityIPListInput) error { + return c.deleteResource(deleteInput.Name) +} + +func (c *SecurityIPListsClient) success(listInfo *SecurityIPListInfo) (*SecurityIPListInfo, error) { + c.unqualify(&listInfo.Name) + return listInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_lists.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_lists.go new file mode 100644 index 000000000..3c6d87364 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_lists.go @@ -0,0 +1,131 @@ +package compute + +// SecurityListsClient is a client for the Security List functions of the Compute API. +type SecurityListsClient struct { + ResourceClient +} + +// SecurityLists obtains a SecurityListsClient which can be used to access to the +// Security List functions of the Compute API +func (c *ComputeClient) SecurityLists() *SecurityListsClient { + return &SecurityListsClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: "security list", + ContainerPath: "/seclist/", + ResourceRootPath: "/seclist", + }} +} + +type SecurityListPolicy string + +const ( + SecurityListPolicyDeny SecurityListPolicy = "deny" + SecurityListPolicyReject SecurityListPolicy = "reject" + SecurityListPolicyPermit SecurityListPolicy = "permit" +) + +// SecurityListInfo describes an existing security list. +type SecurityListInfo struct { + // Shows the default account for your identity domain. + Account string `json:"account"` + // A description of the security list. + Description string `json:description` + // The three-part name of the security list (/Compute-identity_domain/user/object). + Name string `json:"name"` + // The policy for outbound traffic from the security list. + OutboundCIDRPolicy SecurityListPolicy `json:"outbound_cidr_policy"` + // The policy for inbound traffic to the security list + Policy SecurityListPolicy `json:"policy"` + // Uniform Resource Identifier + URI string `json:"uri"` +} + +// CreateSecurityListInput defines a security list to be created. +type CreateSecurityListInput struct { + // A description of the security list. + // Optional + Description string `json:"description"` + // The three-part name of the Security List (/Compute-identity_domain/user/object). + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Required + Name string `json:"name"` + // The policy for outbound traffic from the security list. + // Optional (defaults to `permit`) + OutboundCIDRPolicy SecurityListPolicy `json:"outbound_cidr_policy"` + // The policy for inbound traffic to the security list. + // Optional (defaults to `deny`) + Policy SecurityListPolicy `json:"policy"` +} + +// CreateSecurityList creates a new security list with the given name, policy and outbound CIDR policy. +func (c *SecurityListsClient) CreateSecurityList(createInput *CreateSecurityListInput) (*SecurityListInfo, error) { + createInput.Name = c.getQualifiedName(createInput.Name) + var listInfo SecurityListInfo + if err := c.createResource(createInput, &listInfo); err != nil { + return nil, err + } + + return c.success(&listInfo) +} + +// GetSecurityListInput describes the security list you want to get +type GetSecurityListInput struct { + // The three-part name of the Security List (/Compute-identity_domain/user/object). + // Required + Name string `json:name` +} + +// GetSecurityList retrieves the security list with the given name. +func (c *SecurityListsClient) GetSecurityList(getInput *GetSecurityListInput) (*SecurityListInfo, error) { + var listInfo SecurityListInfo + if err := c.getResource(getInput.Name, &listInfo); err != nil { + return nil, err + } + + return c.success(&listInfo) +} + +// UpdateSecurityListInput defines what to update in a security list +type UpdateSecurityListInput struct { + // A description of the security list. + // Optional + Description string `json:description` + // The three-part name of the Security List (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` + // The policy for outbound traffic from the security list. + // Optional (defaults to `permit`) + OutboundCIDRPolicy SecurityListPolicy `json:"outbound_cidr_policy"` + // The policy for inbound traffic to the security list. + // Optional (defaults to `deny`) + Policy SecurityListPolicy `json:"policy"` +} + +// UpdateSecurityList updates the policy and outbound CIDR pol +func (c *SecurityListsClient) UpdateSecurityList(updateInput *UpdateSecurityListInput) (*SecurityListInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + var listInfo SecurityListInfo + if err := c.updateResource(updateInput.Name, updateInput, &listInfo); err != nil { + return nil, err + } + + return c.success(&listInfo) +} + +// DeleteSecurityListInput describes the security list to destroy +type DeleteSecurityListInput struct { + // The three-part name of the Security List (/Compute-identity_domain/user/object). + // Required + Name string `json:name` +} + +// DeleteSecurityList deletes the security list with the given name. +func (c *SecurityListsClient) DeleteSecurityList(deleteInput *DeleteSecurityListInput) error { + return c.deleteResource(deleteInput.Name) +} + +func (c *SecurityListsClient) success(listInfo *SecurityListInfo) (*SecurityListInfo, error) { + c.unqualify(&listInfo.Name) + return listInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_protocols.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_protocols.go new file mode 100644 index 000000000..406be9cad --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_protocols.go @@ -0,0 +1,187 @@ +package compute + +const ( + SecurityProtocolDescription = "security protocol" + SecurityProtocolContainerPath = "/network/v1/secprotocol/" + SecurityProtocolResourcePath = "/network/v1/secprotocol" +) + +type SecurityProtocolsClient struct { + ResourceClient +} + +// SecurityProtocols() returns an SecurityProtocolsClient that can be used to access the +// necessary CRUD functions for Security Protocols. +func (c *ComputeClient) SecurityProtocols() *SecurityProtocolsClient { + return &SecurityProtocolsClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: SecurityProtocolDescription, + ContainerPath: SecurityProtocolContainerPath, + ResourceRootPath: SecurityProtocolResourcePath, + }, + } +} + +// SecurityProtocolInfo contains the exported fields necessary to hold all the information about an +// Security Protocol +type SecurityProtocolInfo struct { + // List of port numbers or port range strings to match the packet's destination port. + DstPortSet []string `json:"dstPortSet"` + // Protocol used in the data portion of the IP datagram. + IPProtocol string `json:"ipProtocol"` + // List of port numbers or port range strings to match the packet's source port. + SrcPortSet []string `json:"srcPortSet"` + // The name of the Security Protocol + Name string `json:"name"` + // Description of the Security Protocol + Description string `json:"description"` + // Slice of tags associated with the Security Protocol + Tags []string `json:"tags"` + // Uniform Resource Identifier for the Security Protocol + Uri string `json:"uri"` +} + +type CreateSecurityProtocolInput struct { + // The name of the Security Protocol to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Description of the SecurityProtocol + // Optional + Description string `json:"description"` + + // Enter a list of port numbers or port range strings. + //Traffic is enabled by a security rule when a packet's destination port matches the + // ports specified here. + // For TCP, SCTP, and UDP, each port is a destination transport port, between 0 and 65535, + // inclusive. For ICMP, each port is an ICMP type, between 0 and 255, inclusive. + // If no destination ports are specified, all destination ports or ICMP types are allowed. + // Optional + DstPortSet []string `json:"dstPortSet"` + + // The protocol used in the data portion of the IP datagram. + // Specify one of the permitted values or enter a number in the range 0–254 to + // represent the protocol that you want to specify. See Assigned Internet Protocol Numbers. + // Permitted values are: tcp, udp, icmp, igmp, ipip, rdp, esp, ah, gre, icmpv6, ospf, pim, sctp, + // mplsip, all. + // Traffic is enabled by a security rule when the protocol in the packet matches the + // protocol specified here. If no protocol is specified, all protocols are allowed. + // Optional + IPProtocol string `json:"ipProtocol"` + + // Enter a list of port numbers or port range strings. + // Traffic is enabled by a security rule when a packet's source port matches the + // ports specified here. + // For TCP, SCTP, and UDP, each port is a source transport port, + // between 0 and 65535, inclusive. + // For ICMP, each port is an ICMP type, between 0 and 255, inclusive. + // If no source ports are specified, all source ports or ICMP types are allowed. + // Optional + SrcPortSet []string `json:"srcPortSet"` + + // String slice of tags to apply to the Security Protocol object + // Optional + Tags []string `json:"tags"` +} + +// Create a new Security Protocol from an SecurityProtocolsClient and an input struct. +// Returns a populated Info struct for the Security Protocol, and any errors +func (c *SecurityProtocolsClient) CreateSecurityProtocol(input *CreateSecurityProtocolInput) (*SecurityProtocolInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo SecurityProtocolInfo + if err := c.createResource(&input, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type GetSecurityProtocolInput struct { + // The name of the Security Protocol to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +// Returns a populated SecurityProtocolInfo struct from an input struct +func (c *SecurityProtocolsClient) GetSecurityProtocol(input *GetSecurityProtocolInput) (*SecurityProtocolInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo SecurityProtocolInfo + if err := c.getResource(input.Name, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +// UpdateSecurityProtocolInput defines what to update in a security protocol +type UpdateSecurityProtocolInput struct { + // The name of the Security Protocol to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Description of the SecurityProtocol + // Optional + Description string `json:"description"` + + // Enter a list of port numbers or port range strings. + //Traffic is enabled by a security rule when a packet's destination port matches the + // ports specified here. + // For TCP, SCTP, and UDP, each port is a destination transport port, between 0 and 65535, + // inclusive. For ICMP, each port is an ICMP type, between 0 and 255, inclusive. + // If no destination ports are specified, all destination ports or ICMP types are allowed. + DstPortSet []string `json:"dstPortSet"` + + // The protocol used in the data portion of the IP datagram. + // Specify one of the permitted values or enter a number in the range 0–254 to + // represent the protocol that you want to specify. See Assigned Internet Protocol Numbers. + // Permitted values are: tcp, udp, icmp, igmp, ipip, rdp, esp, ah, gre, icmpv6, ospf, pim, sctp, + // mplsip, all. + // Traffic is enabled by a security rule when the protocol in the packet matches the + // protocol specified here. If no protocol is specified, all protocols are allowed. + IPProtocol string `json:"ipProtocol"` + + // Enter a list of port numbers or port range strings. + // Traffic is enabled by a security rule when a packet's source port matches the + // ports specified here. + // For TCP, SCTP, and UDP, each port is a source transport port, + // between 0 and 65535, inclusive. + // For ICMP, each port is an ICMP type, between 0 and 255, inclusive. + // If no source ports are specified, all source ports or ICMP types are allowed. + SrcPortSet []string `json:"srcPortSet"` + + // String slice of tags to apply to the Security Protocol object + // Optional + Tags []string `json:"tags"` +} + +// UpdateSecurityProtocol update the security protocol +func (c *SecurityProtocolsClient) UpdateSecurityProtocol(updateInput *UpdateSecurityProtocolInput) (*SecurityProtocolInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + var ipInfo SecurityProtocolInfo + if err := c.updateResource(updateInput.Name, updateInput, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type DeleteSecurityProtocolInput struct { + // The name of the Security Protocol to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *SecurityProtocolsClient) DeleteSecurityProtocol(input *DeleteSecurityProtocolInput) error { + return c.deleteResource(input.Name) +} + +// Unqualifies any qualified fields in the SecurityProtocolInfo struct +func (c *SecurityProtocolsClient) success(info *SecurityProtocolInfo) (*SecurityProtocolInfo, error) { + c.unqualify(&info.Name) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_rules.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_rules.go new file mode 100644 index 000000000..1773ea8b5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_rules.go @@ -0,0 +1,266 @@ +package compute + +const ( + SecurityRuleDescription = "security rules" + SecurityRuleContainerPath = "/network/v1/secrule/" + SecurityRuleResourcePath = "/network/v1/secrule" +) + +type SecurityRuleClient struct { + ResourceClient +} + +// SecurityRules() returns an SecurityRulesClient that can be used to access the +// necessary CRUD functions for Security Rules. +func (c *ComputeClient) SecurityRules() *SecurityRuleClient { + return &SecurityRuleClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: SecurityRuleDescription, + ContainerPath: SecurityRuleContainerPath, + ResourceRootPath: SecurityRuleResourcePath, + }, + } +} + +// SecurityRuleInfo contains the exported fields necessary to hold all the information about a +// Security Rule +type SecurityRuleInfo struct { + // Name of the ACL that contains this rule. + ACL string `json:"acl"` + // Description of the Security Rule + Description string `json:"description"` + // List of IP address prefix set names to match the packet's destination IP address. + DstIpAddressPrefixSets []string `json:"dstIpAddressPrefixSets"` + // Name of virtual NIC set containing the packet's destination virtual NIC. + DstVnicSet string `json:"dstVnicSet"` + // Allows the security rule to be disabled. + Enabled bool `json:"enabledFlag"` + // Direction of the flow; Can be "egress" or "ingress". + FlowDirection string `json:"FlowDirection"` + // The name of the Security Rule + Name string `json:"name"` + // List of security protocol names to match the packet's protocol and port. + SecProtocols []string `json:"secProtocols"` + // List of multipart names of IP address prefix set to match the packet's source IP address. + SrcIpAddressPrefixSets []string `json:"srcIpAddressPrefixSets"` + // Name of virtual NIC set containing the packet's source virtual NIC. + SrcVnicSet string `json:"srcVnicSet"` + // Slice of tags associated with the Security Rule + Tags []string `json:"tags"` + // Uniform Resource Identifier for the Security Rule + Uri string `json:"uri"` +} + +type CreateSecurityRuleInput struct { + //Select the name of the access control list (ACL) that you want to add this + // security rule to. Security rules are applied to vNIC sets by using ACLs. + // Optional + ACL string `json:"acl,omitempty"` + + // Description of the Security Rule + // Optional + Description string `json:"description"` + + // A list of IP address prefix sets to which you want to permit traffic. + // Only packets to IP addresses in the specified IP address prefix sets are permitted. + // When no destination IP address prefix sets are specified, traffic to any + // IP address is permitted. + // Optional + DstIpAddressPrefixSets []string `json:"dstIpAddressPrefixSets"` + + // The vNICset to which you want to permit traffic. Only packets to vNICs in the + // specified vNICset are permitted. When no destination vNICset is specified, traffic + // to any vNIC is permitted. + // Optional + DstVnicSet string `json:"dstVnicSet,omitempty"` + + // Allows the security rule to be enabled or disabled. This parameter is set to + // true by default. Specify false to disable the security rule. + // Optional + Enabled bool `json:"enabledFlag"` + + // Specify the direction of flow of traffic, which is relative to the instances, + // for this security rule. Allowed values are ingress or egress. + // An ingress packet is a packet received by a virtual NIC, for example from + // another virtual NIC or from the public Internet. + // An egress packet is a packet sent by a virtual NIC, for example to another + // virtual NIC or to the public Internet. + // Required + FlowDirection string `json:"flowDirection"` + + // The name of the Security Rule + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. + // Object names are case-sensitive. When you specify the object name, ensure that an object + // of the same type and with the same name doesn't already exist. + // If such an object already exists, another object of the same type and with the same name won't + // be created and the existing object won't be updated. + // Required + Name string `json:"name"` + + // A list of security protocols for which you want to permit traffic. Only packets that + // match the specified protocols and ports are permitted. When no security protocols are + // specified, traffic using any protocol over any port is permitted. + // Optional + SecProtocols []string `json:"secProtocols"` + + // A list of IP address prefix sets from which you want to permit traffic. Only packets + // from IP addresses in the specified IP address prefix sets are permitted. When no source + // IP address prefix sets are specified, traffic from any IP address is permitted. + // Optional + SrcIpAddressPrefixSets []string `json:"srcIpAddressPrefixSets"` + + // The vNICset from which you want to permit traffic. Only packets from vNICs in the + // specified vNICset are permitted. When no source vNICset is specified, traffic from any + // vNIC is permitted. + // Optional + SrcVnicSet string `json:"srcVnicSet,omitempty"` + + // Strings that you can use to tag the security rule. + // Optional + Tags []string `json:"tags"` +} + +// Create a new Security Rule from an SecurityRuleClient and an input struct. +// Returns a populated Info struct for the Security Rule, and any errors +func (c *SecurityRuleClient) CreateSecurityRule(input *CreateSecurityRuleInput) (*SecurityRuleInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.ACL = c.getQualifiedName(input.ACL) + input.SrcVnicSet = c.getQualifiedName(input.SrcVnicSet) + input.DstVnicSet = c.getQualifiedName(input.DstVnicSet) + input.SrcIpAddressPrefixSets = c.getQualifiedList(input.SrcIpAddressPrefixSets) + input.DstIpAddressPrefixSets = c.getQualifiedList(input.DstIpAddressPrefixSets) + input.SecProtocols = c.getQualifiedList(input.SecProtocols) + + var securityRuleInfo SecurityRuleInfo + if err := c.createResource(&input, &securityRuleInfo); err != nil { + return nil, err + } + + return c.success(&securityRuleInfo) +} + +type GetSecurityRuleInput struct { + // The name of the Security Rule to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +// Returns a populated SecurityRuleInfo struct from an input struct +func (c *SecurityRuleClient) GetSecurityRule(input *GetSecurityRuleInput) (*SecurityRuleInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var securityRuleInfo SecurityRuleInfo + if err := c.getResource(input.Name, &securityRuleInfo); err != nil { + return nil, err + } + + return c.success(&securityRuleInfo) +} + +// UpdateSecurityRuleInput describes a secruity rule to update +type UpdateSecurityRuleInput struct { + //Select the name of the access control list (ACL) that you want to add this + // security rule to. Security rules are applied to vNIC sets by using ACLs. + // Optional + ACL string `json:"acl,omitempty"` + + // Description of the Security Rule + // Optional + Description string `json:"description"` + + // A list of IP address prefix sets to which you want to permit traffic. + // Only packets to IP addresses in the specified IP address prefix sets are permitted. + // When no destination IP address prefix sets are specified, traffic to any + // IP address is permitted. + // Optional + DstIpAddressPrefixSets []string `json:"dstIpAddressPrefixSets"` + + // The vNICset to which you want to permit traffic. Only packets to vNICs in the + // specified vNICset are permitted. When no destination vNICset is specified, traffic + // to any vNIC is permitted. + // Optional + DstVnicSet string `json:"dstVnicSet,omitempty"` + + // Allows the security rule to be enabled or disabled. This parameter is set to + // true by default. Specify false to disable the security rule. + // Optional + Enabled bool `json:"enabledFlag"` + + // Specify the direction of flow of traffic, which is relative to the instances, + // for this security rule. Allowed values are ingress or egress. + // An ingress packet is a packet received by a virtual NIC, for example from + // another virtual NIC or from the public Internet. + // An egress packet is a packet sent by a virtual NIC, for example to another + // virtual NIC or to the public Internet. + // Required + FlowDirection string `json:"flowDirection"` + + // The name of the Security Rule + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. + // Object names are case-sensitive. When you specify the object name, ensure that an object + // of the same type and with the same name doesn't already exist. + // If such an object already exists, another object of the same type and with the same name won't + // be created and the existing object won't be updated. + // Required + Name string `json:"name"` + + // A list of security protocols for which you want to permit traffic. Only packets that + // match the specified protocols and ports are permitted. When no security protocols are + // specified, traffic using any protocol over any port is permitted. + // Optional + SecProtocols []string `json:"secProtocols"` + + // A list of IP address prefix sets from which you want to permit traffic. Only packets + // from IP addresses in the specified IP address prefix sets are permitted. When no source + // IP address prefix sets are specified, traffic from any IP address is permitted. + // Optional + SrcIpAddressPrefixSets []string `json:"srcIpAddressPrefixSets"` + + // The vNICset from which you want to permit traffic. Only packets from vNICs in the + // specified vNICset are permitted. When no source vNICset is specified, traffic from any + // vNIC is permitted. + // Optional + SrcVnicSet string `json:"srcVnicSet,omitempty"` + + // Strings that you can use to tag the security rule. + // Optional + Tags []string `json:"tags"` +} + +// UpdateSecRule modifies the properties of the sec rule with the given name. +func (c *SecurityRuleClient) UpdateSecurityRule(updateInput *UpdateSecurityRuleInput) (*SecurityRuleInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + updateInput.ACL = c.getQualifiedName(updateInput.ACL) + updateInput.SrcVnicSet = c.getQualifiedName(updateInput.SrcVnicSet) + updateInput.DstVnicSet = c.getQualifiedName(updateInput.DstVnicSet) + updateInput.SrcIpAddressPrefixSets = c.getQualifiedList(updateInput.SrcIpAddressPrefixSets) + updateInput.DstIpAddressPrefixSets = c.getQualifiedList(updateInput.DstIpAddressPrefixSets) + updateInput.SecProtocols = c.getQualifiedList(updateInput.SecProtocols) + + var securityRuleInfo SecurityRuleInfo + if err := c.updateResource(updateInput.Name, updateInput, &securityRuleInfo); err != nil { + return nil, err + } + + return c.success(&securityRuleInfo) +} + +type DeleteSecurityRuleInput struct { + // The name of the Security Rule to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *SecurityRuleClient) DeleteSecurityRule(input *DeleteSecurityRuleInput) error { + return c.deleteResource(input.Name) +} + +// Unqualifies any qualified fields in the IPNetworkExchangeInfo struct +func (c *SecurityRuleClient) success(info *SecurityRuleInfo) (*SecurityRuleInfo, error) { + c.unqualify(&info.Name, &info.ACL, &info.SrcVnicSet, &info.DstVnicSet) + info.SrcIpAddressPrefixSets = c.getUnqualifiedList(info.SrcIpAddressPrefixSets) + info.DstIpAddressPrefixSets = c.getUnqualifiedList(info.DstIpAddressPrefixSets) + info.SecProtocols = c.getUnqualifiedList(info.SecProtocols) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/snapshots.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/snapshots.go new file mode 100644 index 000000000..d2a9616e9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/snapshots.go @@ -0,0 +1,217 @@ +package compute + +import ( + "fmt" + "time" +) + +const WaitForSnapshotCompleteTimeout = time.Duration(600 * time.Second) + +// SnapshotsClient is a client for the Snapshot functions of the Compute API. +type SnapshotsClient struct { + ResourceClient +} + +// Snapshots obtains an SnapshotsClient which can be used to access to the +// Snapshot functions of the Compute API +func (c *ComputeClient) Snapshots() *SnapshotsClient { + return &SnapshotsClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: "Snapshot", + ContainerPath: "/snapshot/", + ResourceRootPath: "/snapshot", + }} +} + +type SnapshotState string + +const ( + SnapshotActive SnapshotState = "active" + SnapshotComplete SnapshotState = "complete" + SnapshotQueued SnapshotState = "queued" + SnapshotError SnapshotState = "error" +) + +type SnapshotDelay string + +const ( + SnapshotDelayShutdown SnapshotDelay = "shutdown" +) + +// SnapshotInfo describes an existing Snapshot. +type Snapshot struct { + // Shows the default account for your identity domain. + Account string `json:"account"` + // Timestamp when this request was created. + CreationTime string `json:"creation_time"` + // Snapshot of the instance is not taken immediately. + Delay SnapshotDelay `json:"delay"` + // A description of the reason this request entered "error" state. + ErrorReason string `json:"error_reason"` + // Name of the instance + Instance string `json:"instance"` + // Name of the machine image generated from the instance snapshot request. + MachineImage string `json:"machineimage"` + // Name of the instance snapshot request. + Name string `json:"name"` + // Not used + Quota string `json:"quota"` + // The state of the request. + State SnapshotState `json:"state"` + // Uniform Resource Identifier + URI string `json:"uri"` +} + +// CreateSnapshotInput defines an Snapshot to be created. +type CreateSnapshotInput struct { + // The name of the account that contains the credentials and access details of + // Oracle Storage Cloud Service. The machine image file is uploaded to the Oracle + // Storage Cloud Service account that you specify. + // Optional + Account string `json:"account,omitempty"` + // Use this option when you want to preserve the custom changes you have made + // to an instance before deleting the instance. The only permitted value is shutdown. + // Snapshot of the instance is not taken immediately. It creates a machine image which + // preserves the changes you have made to the instance, and then the instance is deleted. + // Note: This option has no effect if you shutdown the instance from inside it. Any pending + // snapshot request on that instance goes into error state. You must delete the instance + // (DELETE /instance/{name}). + // Optional + Delay SnapshotDelay `json:"delay,omitempty"` + // Name of the instance that you want to clone. + // Required + Instance string `json:"instance"` + // Specify the name of the machine image created by the snapshot request. + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. + // Object names are case-sensitive. + // If you don't specify a name for this object, then the name is generated automatically. + // Optional + MachineImage string `json:"machineimage,omitempty"` + // Time to wait for snapshot to be completed + Timeout time.Duration `json:"-"` +} + +// CreateSnapshot creates a new Snapshot +func (c *SnapshotsClient) CreateSnapshot(input *CreateSnapshotInput) (*Snapshot, error) { + input.Account = c.getQualifiedACMEName(input.Account) + input.Instance = c.getQualifiedName(input.Instance) + input.MachineImage = c.getQualifiedName(input.MachineImage) + + var snapshotInfo Snapshot + if err := c.createResource(&input, &snapshotInfo); err != nil { + return nil, err + } + + // Call wait for snapshot complete now, as creating the snashot is an eventually consistent operation + getInput := &GetSnapshotInput{ + Name: snapshotInfo.Name, + } + + if input.Timeout == 0 { + input.Timeout = WaitForSnapshotCompleteTimeout + } + + // Wait for snapshot to be complete and return the result + return c.WaitForSnapshotComplete(getInput, input.Timeout) +} + +// GetSnapshotInput describes the snapshot to get +type GetSnapshotInput struct { + // The name of the Snapshot + // Required + Name string `json:name` +} + +// GetSnapshot retrieves the Snapshot with the given name. +func (c *SnapshotsClient) GetSnapshot(getInput *GetSnapshotInput) (*Snapshot, error) { + getInput.Name = c.getQualifiedName(getInput.Name) + var snapshotInfo Snapshot + if err := c.getResource(getInput.Name, &snapshotInfo); err != nil { + return nil, err + } + + return c.success(&snapshotInfo) +} + +// DeleteSnapshotInput describes the snapshot to delete +type DeleteSnapshotInput struct { + // The name of the Snapshot + // Required + Snapshot string + // The name of the machine image + // Required + MachineImage string + // Time to wait for snapshot to be deleted + Timeout time.Duration +} + +// DeleteSnapshot deletes the Snapshot with the given name. +// A machine image gets created with the associated snapshot and needs to be deleted as well. +func (c *SnapshotsClient) DeleteSnapshot(machineImagesClient *MachineImagesClient, input *DeleteSnapshotInput) error { + // Wait for snapshot complete in case delay is active and the corresponding instance needs to be deleted first + getInput := &GetSnapshotInput{ + Name: input.Snapshot, + } + + if input.Timeout == 0 { + input.Timeout = WaitForSnapshotCompleteTimeout + } + + if _, err := c.WaitForSnapshotComplete(getInput, input.Timeout); err != nil { + return fmt.Errorf("Could not delete snapshot: %s", err) + } + + if err := c.deleteResource(input.Snapshot); err != nil { + return fmt.Errorf("Could not delete snapshot: %s", err) + } + + deleteMachineImageRequest := &DeleteMachineImageInput{ + Name: input.MachineImage, + } + if err := machineImagesClient.DeleteMachineImage(deleteMachineImageRequest); err != nil { + return fmt.Errorf("Could not delete machine image associated with snapshot: %s", err) + } + + return nil +} + +// WaitForSnapshotComplete waits for an snapshot to be completely initialized and available. +func (c *SnapshotsClient) WaitForSnapshotComplete(input *GetSnapshotInput, timeout time.Duration) (*Snapshot, error) { + var info *Snapshot + var getErr error + err := c.client.WaitFor("snapshot to be complete", timeout, func() (bool, error) { + info, getErr = c.GetSnapshot(input) + if getErr != nil { + return false, getErr + } + switch s := info.State; s { + case SnapshotError: + return false, fmt.Errorf("Error initializing snapshot: %s", info.ErrorReason) + case SnapshotComplete: + c.client.DebugLogString("Snapshot Complete") + return true, nil + case SnapshotQueued: + c.client.DebugLogString("Snapshot Queuing") + return false, nil + case SnapshotActive: + c.client.DebugLogString("Snapshot Active") + if info.Delay == SnapshotDelayShutdown { + return true, nil + } + return false, nil + default: + c.client.DebugLogString(fmt.Sprintf("Unknown snapshot state: %s, waiting", s)) + return false, nil + } + }) + return info, err +} + +func (c *SnapshotsClient) success(snapshotInfo *Snapshot) (*Snapshot, error) { + c.unqualify(&snapshotInfo.Account) + c.unqualify(&snapshotInfo.Instance) + c.unqualify(&snapshotInfo.MachineImage) + c.unqualify(&snapshotInfo.Name) + return snapshotInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ssh_keys.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ssh_keys.go new file mode 100644 index 000000000..7e8be20ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ssh_keys.go @@ -0,0 +1,124 @@ +package compute + +// SSHKeysClient is a client for the SSH key functions of the Compute API. +type SSHKeysClient struct { + ResourceClient +} + +// SSHKeys obtains an SSHKeysClient which can be used to access to the +// SSH key functions of the Compute API +func (c *ComputeClient) SSHKeys() *SSHKeysClient { + return &SSHKeysClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: "SSH key", + ContainerPath: "/sshkey/", + ResourceRootPath: "/sshkey", + }} +} + +// SSHKeyInfo describes an existing SSH key. +type SSHKey struct { + // Indicates whether the key is enabled (true) or disabled. + Enabled bool `json:"enabled"` + // The SSH public key value. + Key string `json:"key"` + // The three-part name of the SSH Key (/Compute-identity_domain/user/object). + Name string `json:"name"` + // Unique Resource Identifier + URI string `json:"uri"` +} + +// CreateSSHKeyInput defines an SSH key to be created. +type CreateSSHKeyInput struct { + // The three-part name of the SSH Key (/Compute-identity_domain/user/object). + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Required + Name string `json:"name"` + // The SSH public key value. + // Required + Key string `json:"key"` + // Indicates whether the key must be enabled (default) or disabled. Note that disabled keys cannot be associated with instances. + // To explicitly enable the key, specify true. To disable the key, specify false. + // Optional + Enabled bool `json:"enabled"` +} + +// CreateSSHKey creates a new SSH key with the given name, key and enabled flag. +func (c *SSHKeysClient) CreateSSHKey(createInput *CreateSSHKeyInput) (*SSHKey, error) { + var keyInfo SSHKey + // We have to update after create to get the full ssh key into opc + updateSSHKeyInput := UpdateSSHKeyInput{ + Name: createInput.Name, + Key: createInput.Key, + Enabled: createInput.Enabled, + } + + createInput.Name = c.getQualifiedName(createInput.Name) + if err := c.createResource(&createInput, &keyInfo); err != nil { + return nil, err + } + + _, err := c.UpdateSSHKey(&updateSSHKeyInput) + if err != nil { + return nil, err + } + + return c.success(&keyInfo) +} + +// GetSSHKeyInput describes the ssh key to get +type GetSSHKeyInput struct { + // The three-part name of the SSH Key (/Compute-identity_domain/user/object). + Name string `json:name` +} + +// GetSSHKey retrieves the SSH key with the given name. +func (c *SSHKeysClient) GetSSHKey(getInput *GetSSHKeyInput) (*SSHKey, error) { + var keyInfo SSHKey + if err := c.getResource(getInput.Name, &keyInfo); err != nil { + return nil, err + } + + return c.success(&keyInfo) +} + +// UpdateSSHKeyInput defines an SSH key to be updated +type UpdateSSHKeyInput struct { + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` + // The SSH public key value. + // Required + Key string `json:"key"` + // Indicates whether the key must be enabled (default) or disabled. Note that disabled keys cannot be associated with instances. + // To explicitly enable the key, specify true. To disable the key, specify false. + // Optional + // TODO/NOTE: isn't this required? + Enabled bool `json:"enabled"` +} + +// UpdateSSHKey updates the key and enabled flag of the SSH key with the given name. +func (c *SSHKeysClient) UpdateSSHKey(updateInput *UpdateSSHKeyInput) (*SSHKey, error) { + var keyInfo SSHKey + updateInput.Name = c.getQualifiedName(updateInput.Name) + if err := c.updateResource(updateInput.Name, updateInput, &keyInfo); err != nil { + return nil, err + } + return c.success(&keyInfo) +} + +// DeleteKeyInput describes the ssh key to delete +type DeleteSSHKeyInput struct { + // The three-part name of the SSH Key (/Compute-identity_domain/user/object). + Name string `json:name` +} + +// DeleteSSHKey deletes the SSH key with the given name. +func (c *SSHKeysClient) DeleteSSHKey(deleteInput *DeleteSSHKeyInput) error { + return c.deleteResource(deleteInput.Name) +} + +func (c *SSHKeysClient) success(keyInfo *SSHKey) (*SSHKey, error) { + c.unqualify(&keyInfo.Name) + return keyInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_attachments.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_attachments.go new file mode 100644 index 000000000..51e18af47 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_attachments.go @@ -0,0 +1,178 @@ +package compute + +import ( + "time" + + "github.com/hashicorp/go-oracle-terraform/client" +) + +const WaitForVolumeAttachmentDeleteTimeout = time.Duration(30 * time.Second) +const WaitForVolumeAttachmentReadyTimeout = time.Duration(30 * time.Second) + +// StorageAttachmentsClient is a client for the Storage Attachment functions of the Compute API. +type StorageAttachmentsClient struct { + ResourceClient +} + +// StorageAttachments obtains a StorageAttachmentsClient which can be used to access to the +// Storage Attachment functions of the Compute API +func (c *ComputeClient) StorageAttachments() *StorageAttachmentsClient { + return &StorageAttachmentsClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: "storage volume attachment", + ContainerPath: "/storage/attachment/", + ResourceRootPath: "/storage/attachment", + }} +} + +type StorageAttachmentState string + +const ( + Attaching StorageAttachmentState = "attaching" + Attached StorageAttachmentState = "attached" + Detaching StorageAttachmentState = "detaching" + Unavailable StorageAttachmentState = "unavailable" + Unknown StorageAttachmentState = "unknown" +) + +// StorageAttachmentInfo describes an existing storage attachment. +type StorageAttachmentInfo struct { + // Name of this attachment, generated by the server. + Name string `json:"name"` + + // Index number for the volume. The allowed range is 1-10 + // An attachment with index 1 is exposed to the instance as /dev/xvdb, an attachment with index 2 is exposed as /dev/xvdc, and so on. + Index int `json:"index"` + + // Multipart name of the instance attached to the storage volume. + InstanceName string `json:"instance_name"` + + // Multipart name of the volume attached to the instance. + StorageVolumeName string `json:"storage_volume_name"` + + // The State of the Storage Attachment + State StorageAttachmentState `json:"state"` +} + +func (c *StorageAttachmentsClient) success(attachmentInfo *StorageAttachmentInfo) (*StorageAttachmentInfo, error) { + c.unqualify(&attachmentInfo.Name, &attachmentInfo.InstanceName, &attachmentInfo.StorageVolumeName) + return attachmentInfo, nil +} + +type CreateStorageAttachmentInput struct { + // Index number for the volume. The allowed range is 1-10 + // An attachment with index 1 is exposed to the instance as /dev/xvdb, an attachment with index 2 is exposed as /dev/xvdc, and so on. + // Required. + Index int `json:"index"` + + // Multipart name of the instance to which you want to attach the volume. + // Required. + InstanceName string `json:"instance_name"` + + // Multipart name of the volume that you want to attach. + // Required. + StorageVolumeName string `json:"storage_volume_name"` + + // Time to wait for storage volume attachment + Timeout time.Duration `json:"-"` +} + +// CreateStorageAttachment creates a storage attachment attaching the given volume to the given instance at the given index. +func (c *StorageAttachmentsClient) CreateStorageAttachment(input *CreateStorageAttachmentInput) (*StorageAttachmentInfo, error) { + input.InstanceName = c.getQualifiedName(input.InstanceName) + + var attachmentInfo *StorageAttachmentInfo + if err := c.createResource(&input, &attachmentInfo); err != nil { + return nil, err + } + + if input.Timeout == 0 { + input.Timeout = WaitForVolumeAttachmentReadyTimeout + } + + return c.waitForStorageAttachmentToFullyAttach(attachmentInfo.Name, input.Timeout) +} + +// DeleteStorageAttachmentInput represents the body of an API request to delete a Storage Attachment. +type DeleteStorageAttachmentInput struct { + // The three-part name of the Storage Attachment (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` + + // Time to wait for storage volume snapshot + Timeout time.Duration `json:"-"` +} + +// DeleteStorageAttachment deletes the storage attachment with the given name. +func (c *StorageAttachmentsClient) DeleteStorageAttachment(input *DeleteStorageAttachmentInput) error { + if err := c.deleteResource(input.Name); err != nil { + return err + } + + if input.Timeout == 0 { + input.Timeout = WaitForVolumeAttachmentDeleteTimeout + } + + return c.waitForStorageAttachmentToBeDeleted(input.Name, input.Timeout) +} + +// GetStorageAttachmentInput represents the body of an API request to obtain a Storage Attachment. +type GetStorageAttachmentInput struct { + // The three-part name of the Storage Attachment (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// GetStorageAttachment retrieves the storage attachment with the given name. +func (c *StorageAttachmentsClient) GetStorageAttachment(input *GetStorageAttachmentInput) (*StorageAttachmentInfo, error) { + var attachmentInfo *StorageAttachmentInfo + if err := c.getResource(input.Name, &attachmentInfo); err != nil { + return nil, err + } + + return c.success(attachmentInfo) +} + +// waitForStorageAttachmentToFullyAttach waits for the storage attachment with the given name to be fully attached, or times out. +func (c *StorageAttachmentsClient) waitForStorageAttachmentToFullyAttach(name string, timeout time.Duration) (*StorageAttachmentInfo, error) { + var waitResult *StorageAttachmentInfo + + err := c.client.WaitFor("storage attachment to be attached", timeout, func() (bool, error) { + input := &GetStorageAttachmentInput{ + Name: name, + } + info, err := c.GetStorageAttachment(input) + if err != nil { + return false, err + } + + if info != nil { + if info.State == Attached { + waitResult = info + return true, nil + } + } + + return false, nil + }) + + return waitResult, err +} + +// waitForStorageAttachmentToBeDeleted waits for the storage attachment with the given name to be fully deleted, or times out. +func (c *StorageAttachmentsClient) waitForStorageAttachmentToBeDeleted(name string, timeout time.Duration) error { + return c.client.WaitFor("storage attachment to be deleted", timeout, func() (bool, error) { + input := &GetStorageAttachmentInput{ + Name: name, + } + _, err := c.GetStorageAttachment(input) + if err != nil { + if client.WasNotFoundError(err) { + return true, nil + } + return false, err + } + return false, nil + }) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_snapshots.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_snapshots.go new file mode 100644 index 000000000..aa7c4a7a2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_snapshots.go @@ -0,0 +1,251 @@ +package compute + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/go-oracle-terraform/client" +) + +const ( + StorageVolumeSnapshotDescription = "storage volume snapshot" + StorageVolumeSnapshotContainerPath = "/storage/snapshot/" + StorageVolumeSnapshotResourcePath = "/storage/snapshot" + + WaitForSnapshotCreateTimeout = time.Duration(2400 * time.Second) + WaitForSnapshotDeleteTimeout = time.Duration(1500 * time.Second) + + // Collocated Snapshot Property + SnapshotPropertyCollocated = "/oracle/private/storage/snapshot/collocated" +) + +// StorageVolumeSnapshotClient is a client for the Storage Volume Snapshot functions of the Compute API. +type StorageVolumeSnapshotClient struct { + ResourceClient +} + +func (c *ComputeClient) StorageVolumeSnapshots() *StorageVolumeSnapshotClient { + return &StorageVolumeSnapshotClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: StorageVolumeSnapshotDescription, + ContainerPath: StorageVolumeSnapshotContainerPath, + ResourceRootPath: StorageVolumeSnapshotResourcePath, + }, + } +} + +// StorageVolumeSnapshotInfo represents the information retrieved from the service about a storage volume snapshot +type StorageVolumeSnapshotInfo struct { + // Account to use for snapshots + Account string `json:"account"` + + // Description of the snapshot + Description string `json:"description"` + + // The name of the machine image that's used in the boot volume from which this snapshot is taken + MachineImageName string `json:"machineimage_name"` + + // Name of the snapshot + Name string `json:"name"` + + // String indicating whether the parent volume is bootable or not + ParentVolumeBootable string `json:"parent_volume_bootable"` + + // Platform the snapshot is compatible with + Platform string `json:"platform"` + + // String determining whether the snapshot is remote or collocated + Property string `json:"property"` + + // The size of the snapshot in GB + Size string `json:"size"` + + // The ID of the snapshot. Generated by the server + SnapshotID string `json:"snapshot_id"` + + // The timestamp of the storage snapshot + SnapshotTimestamp string `json:"snapshot_timestamp"` + + // Timestamp for when the operation started + StartTimestamp string `json:"start_timestamp"` + + // Status of the snapshot + Status string `json:"status"` + + // Status Detail of the storage snapshot + StatusDetail string `json:"status_detail"` + + // Indicates the time that the current view of the storage volume snapshot was generated. + StatusTimestamp string `json:"status_timestamp"` + + // Array of tags for the snapshot + Tags []string `json:"tags,omitempty"` + + // Uniform Resource Identifier + URI string `json:"uri"` + + // Name of the parent storage volume for the snapshot + Volume string `json:"volume"` +} + +// CreateStorageVolumeSnapshotInput represents the body of an API request to create a new storage volume snapshot +type CreateStorageVolumeSnapshotInput struct { + // Description of the snapshot + // Optional + Description string `json:"description,omitempty"` + + // Name of the snapshot + // Optional, will be generated if not specified + Name string `json:"name,omitempty"` + + // Whether or not the parent volume is bootable + // Optional + ParentVolumeBootable string `json:"parent_volume_bootable,omitempty"` + + // Whether collocated or remote + // Optional, will be remote if unspecified + Property string `json:"property,omitempty"` + + // Array of tags for the snapshot + // Optional + Tags []string `json:"tags,omitempty"` + + // Name of the volume to create the snapshot from + // Required + Volume string `json:"volume"` + + // Timeout to wait for snapshot to be completed. Will use default if unspecified + Timeout time.Duration `json:"-"` +} + +// CreateStorageVolumeSnapshot creates a snapshot based on the supplied information struct +func (c *StorageVolumeSnapshotClient) CreateStorageVolumeSnapshot(input *CreateStorageVolumeSnapshotInput) (*StorageVolumeSnapshotInfo, error) { + if input.Name != "" { + input.Name = c.getQualifiedName(input.Name) + } + input.Volume = c.getQualifiedName(input.Volume) + + var storageSnapshotInfo StorageVolumeSnapshotInfo + if err := c.createResource(&input, &storageSnapshotInfo); err != nil { + return nil, err + } + + if input.Timeout == 0 { + input.Timeout = WaitForSnapshotCreateTimeout + } + + // The name of the snapshot could have been generated. Use the response name as input + return c.waitForStorageSnapshotAvailable(storageSnapshotInfo.Name, input.Timeout) +} + +// GetStorageVolumeSnapshotInput represents the body of an API request to get information on a storage volume snapshot +type GetStorageVolumeSnapshotInput struct { + // Name of the snapshot + Name string `json:"name"` +} + +// GetStorageVolumeSnapshot makes an API request to populate information on a storage volume snapshot +func (c *StorageVolumeSnapshotClient) GetStorageVolumeSnapshot(input *GetStorageVolumeSnapshotInput) (*StorageVolumeSnapshotInfo, error) { + var storageSnapshot StorageVolumeSnapshotInfo + input.Name = c.getQualifiedName(input.Name) + if err := c.getResource(input.Name, &storageSnapshot); err != nil { + if client.WasNotFoundError(err) { + return nil, nil + } + + return nil, err + } + return c.success(&storageSnapshot) +} + +// DeleteStorageVolumeSnapshotInput represents the body of an API request to delete a storage volume snapshot +type DeleteStorageVolumeSnapshotInput struct { + // Name of the snapshot to delete + Name string `json:"name"` + + // Timeout to wait for deletion, will use default if unspecified + Timeout time.Duration `json:"-"` +} + +// DeleteStoragevolumeSnapshot makes an API request to delete a storage volume snapshot +func (c *StorageVolumeSnapshotClient) DeleteStorageVolumeSnapshot(input *DeleteStorageVolumeSnapshotInput) error { + input.Name = c.getQualifiedName(input.Name) + + if err := c.deleteResource(input.Name); err != nil { + return err + } + + if input.Timeout == 0 { + input.Timeout = WaitForSnapshotDeleteTimeout + } + + return c.waitForStorageSnapshotDeleted(input.Name, input.Timeout) +} + +func (c *StorageVolumeSnapshotClient) success(result *StorageVolumeSnapshotInfo) (*StorageVolumeSnapshotInfo, error) { + c.unqualify(&result.Name) + c.unqualify(&result.Volume) + + sizeInGigaBytes, err := sizeInGigaBytes(result.Size) + if err != nil { + return nil, err + } + result.Size = sizeInGigaBytes + + return result, nil +} + +// Waits for a storage snapshot to become available +func (c *StorageVolumeSnapshotClient) waitForStorageSnapshotAvailable(name string, timeout time.Duration) (*StorageVolumeSnapshotInfo, error) { + var result *StorageVolumeSnapshotInfo + + err := c.client.WaitFor( + fmt.Sprintf("storage volume snapshot %s to become available", c.getQualifiedName(name)), + timeout, + func() (bool, error) { + req := &GetStorageVolumeSnapshotInput{ + Name: name, + } + res, err := c.GetStorageVolumeSnapshot(req) + if err != nil { + return false, err + } + + if res != nil { + result = res + if strings.ToLower(result.Status) == "completed" { + return true, nil + } else if strings.ToLower(result.Status) == "error" { + return false, fmt.Errorf("Snapshot '%s' failed to create successfully. Status: %s Status Detail: %s", result.Name, result.Status, result.StatusDetail) + } + } + + return false, nil + }) + + return result, err +} + +// Waits for a storage snapshot to be deleted +func (c *StorageVolumeSnapshotClient) waitForStorageSnapshotDeleted(name string, timeout time.Duration) error { + return c.client.WaitFor( + fmt.Sprintf("storage volume snapshot %s to be deleted", c.getQualifiedName(name)), + timeout, + func() (bool, error) { + req := &GetStorageVolumeSnapshotInput{ + Name: name, + } + res, err := c.GetStorageVolumeSnapshot(req) + if res == nil { + return true, nil + } + + if err != nil { + return false, err + } + + return res == nil, nil + }) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go new file mode 100644 index 000000000..c479638de --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go @@ -0,0 +1,389 @@ +package compute + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-oracle-terraform/client" +) + +const WaitForVolumeReadyTimeout = time.Duration(600 * time.Second) +const WaitForVolumeDeleteTimeout = time.Duration(600 * time.Second) + +// StorageVolumeClient is a client for the Storage Volume functions of the Compute API. +type StorageVolumeClient struct { + ResourceClient +} + +// StorageVolumes obtains a StorageVolumeClient which can be used to access to the +// Storage Volume functions of the Compute API +func (c *ComputeClient) StorageVolumes() *StorageVolumeClient { + return &StorageVolumeClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: "storage volume", + ContainerPath: "/storage/volume/", + ResourceRootPath: "/storage/volume", + }} + +} + +type StorageVolumeKind string + +const ( + StorageVolumeKindDefault StorageVolumeKind = "/oracle/public/storage/default" + StorageVolumeKindLatency StorageVolumeKind = "/oracle/public/storage/latency" + StorageVolumeKindSSD StorageVolumeKind = "/oracle/public/storage/ssd/gpl" +) + +// StorageVolumeInfo represents information retrieved from the service about a Storage Volume. +type StorageVolumeInfo struct { + // Shows the default account for your identity domain. + Account string `json:"account,omitempty"` + + // true indicates that the storage volume can also be used as a boot disk for an instance. + // If you set the value to true, then you must specify values for the `ImageList` and `ImageListEntry` fields. + Bootable bool `json:"bootable,omitempty"` + + // The description of the storage volume. + Description string `json:"description,omitempty"` + + // The hypervisor that this volume is compatible with. + Hypervisor string `json:"hypervisor,omitempty"` + + // Name of machine image to extract onto this volume when created. This information is provided only for bootable storage volumes. + ImageList string `json:"imagelist,omitempty"` + + // Specific imagelist entry version to extract. + ImageListEntry int `json:"imagelist_entry,omitempty"` + + // Three-part name of the machine image. This information is available if the volume is a bootable storage volume. + MachineImage string `json:"machineimage_name,omitempty"` + + // All volumes are managed volumes. Default value is true. + Managed bool `json:"managed,omitempty"` + + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` + + // The OS platform this volume is compatible with. + Platform string `json:"platform,omitempty"` + + // The storage-pool property: /oracle/public/storage/latency or /oracle/public/storage/default. + Properties []string `json:"properties,omitempty"` + + // Boolean field indicating whether this volume can be attached as readonly. If set to False the volume will be attached as read-write. + ReadOnly bool `json:"readonly,omitempty"` + + // The size of this storage volume in GB. + Size string `json:"size"` + + // Name of the parent snapshot from which the storage volume is restored or cloned. + Snapshot string `json:"snapshot,omitempty"` + + // Account of the parent snapshot from which the storage volume is restored. + SnapshotAccount string `json:"snapshot_account,omitempty"` + + // Id of the parent snapshot from which the storage volume is restored or cloned. + SnapshotID string `json:"snapshot_id,omitempty"` + + // TODO: this should become a Constant, if/when we have the values + // The current state of the storage volume. + Status string `json:"status,omitempty"` + + // Details about the latest state of the storage volume. + StatusDetail string `json:"status_detail,omitempty"` + + // It indicates the time that the current view of the storage volume was generated. + StatusTimestamp string `json:"status_timestamp,omitempty"` + + // The storage pool from which this volume is allocated. + StoragePool string `json:"storage_pool,omitempty"` + + // Comma-separated strings that tag the storage volume. + Tags []string `json:"tags,omitempty"` + + // Uniform Resource Identifier + URI string `json:"uri,omitempty"` +} + +func (c *StorageVolumeClient) getStorageVolumePath(name string) string { + return c.getObjectPath("/storage/volume", name) +} + +// CreateStorageVolumeInput represents the body of an API request to create a new Storage Volume. +type CreateStorageVolumeInput struct { + // true indicates that the storage volume can also be used as a boot disk for an instance. + // If you set the value to true, then you must specify values for the `ImageList` and `ImageListEntry` fields. + Bootable bool `json:"bootable,omitempty"` + + // The description of the storage volume. + Description string `json:"description,omitempty"` + + // Name of machine image to extract onto this volume when created. This information is provided only for bootable storage volumes. + ImageList string `json:"imagelist,omitempty"` + + // Specific imagelist entry version to extract. + ImageListEntry int `json:"imagelist_entry,omitempty"` + + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` + + // The storage-pool property: /oracle/public/storage/latency or /oracle/public/storage/default. + Properties []string `json:"properties,omitempty"` + + // The size of this storage volume in GB. + Size string `json:"size"` + + // Name of the parent snapshot from which the storage volume is restored or cloned. + Snapshot string `json:"snapshot,omitempty"` + + // Account of the parent snapshot from which the storage volume is restored. + SnapshotAccount string `json:"snapshot_account,omitempty"` + + // Id of the parent snapshot from which the storage volume is restored or cloned. + SnapshotID string `json:"snapshot_id,omitempty"` + + // Comma-separated strings that tag the storage volume. + Tags []string `json:"tags,omitempty"` + + // Timeout to wait for storage volume creation. + Timeout time.Duration `json:"-"` +} + +// CreateStorageVolume uses the given CreateStorageVolumeInput to create a new Storage Volume. +func (c *StorageVolumeClient) CreateStorageVolume(input *CreateStorageVolumeInput) (*StorageVolumeInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.ImageList = c.getQualifiedName(input.ImageList) + + sizeInBytes, err := sizeInBytes(input.Size) + if err != nil { + return nil, err + } + input.Size = sizeInBytes + + var storageInfo StorageVolumeInfo + if err := c.createResource(&input, &storageInfo); err != nil { + return nil, err + } + + // Should never be nil, as we set this in the provider; but protect against it anyways. + if input.Timeout == 0 { + input.Timeout = WaitForVolumeReadyTimeout + } + + volume, err := c.waitForStorageVolumeToBecomeAvailable(input.Name, input.Timeout) + if err != nil { + if volume != nil { + deleteInput := &DeleteStorageVolumeInput{ + Name: volume.Name, + } + + if err := c.DeleteStorageVolume(deleteInput); err != nil { + return nil, err + } + } + } + return volume, err +} + +// DeleteStorageVolumeInput represents the body of an API request to delete a Storage Volume. +type DeleteStorageVolumeInput struct { + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` + + // Timeout to wait for storage volume deletion + Timeout time.Duration `json:"-"` +} + +// DeleteStorageVolume deletes the specified storage volume. +func (c *StorageVolumeClient) DeleteStorageVolume(input *DeleteStorageVolumeInput) error { + if err := c.deleteResource(input.Name); err != nil { + return err + } + + // Should never be nil, but protect against it anyways + if input.Timeout == 0 { + input.Timeout = WaitForVolumeDeleteTimeout + } + + return c.waitForStorageVolumeToBeDeleted(input.Name, input.Timeout) +} + +// GetStorageVolumeInput represents the body of an API request to obtain a Storage Volume. +type GetStorageVolumeInput struct { + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` +} + +func (c *StorageVolumeClient) success(result *StorageVolumeInfo) (*StorageVolumeInfo, error) { + c.unqualify(&result.ImageList) + c.unqualify(&result.Name) + c.unqualify(&result.Snapshot) + + sizeInMegaBytes, err := sizeInGigaBytes(result.Size) + if err != nil { + return nil, err + } + result.Size = sizeInMegaBytes + + return result, nil +} + +// GetStorageVolume gets Storage Volume information for the specified storage volume. +func (c *StorageVolumeClient) GetStorageVolume(input *GetStorageVolumeInput) (*StorageVolumeInfo, error) { + var storageVolume StorageVolumeInfo + if err := c.getResource(input.Name, &storageVolume); err != nil { + if client.WasNotFoundError(err) { + return nil, nil + } + + return nil, err + } + + return c.success(&storageVolume) +} + +// UpdateStorageVolumeInput represents the body of an API request to update a Storage Volume. +type UpdateStorageVolumeInput struct { + // The description of the storage volume. + Description string `json:"description,omitempty"` + + // Name of machine image to extract onto this volume when created. This information is provided only for bootable storage volumes. + ImageList string `json:"imagelist,omitempty"` + + // Specific imagelist entry version to extract. + ImageListEntry int `json:"imagelist_entry,omitempty"` + + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` + + // The storage-pool property: /oracle/public/storage/latency or /oracle/public/storage/default. + Properties []string `json:"properties,omitempty"` + + // The size of this storage volume in GB. + Size string `json:"size"` + + // Name of the parent snapshot from which the storage volume is restored or cloned. + Snapshot string `json:"snapshot,omitempty"` + + // Account of the parent snapshot from which the storage volume is restored. + SnapshotAccount string `json:"snapshot_account,omitempty"` + + // Id of the parent snapshot from which the storage volume is restored or cloned. + SnapshotID string `json:"snapshot_id,omitempty"` + + // Comma-separated strings that tag the storage volume. + Tags []string `json:"tags,omitempty"` + + // Time to wait for storage volume ready + Timeout time.Duration `json:"-"` +} + +// UpdateStorageVolume updates the specified storage volume, optionally modifying size, description and tags. +func (c *StorageVolumeClient) UpdateStorageVolume(input *UpdateStorageVolumeInput) (*StorageVolumeInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.ImageList = c.getQualifiedName(input.ImageList) + + sizeInBytes, err := sizeInBytes(input.Size) + if err != nil { + return nil, err + } + input.Size = sizeInBytes + + path := c.getStorageVolumePath(input.Name) + _, err = c.executeRequest("PUT", path, input) + if err != nil { + return nil, err + } + + if input.Timeout == 0 { + input.Timeout = WaitForVolumeReadyTimeout + } + + volumeInfo, err := c.waitForStorageVolumeToBecomeAvailable(input.Name, input.Timeout) + if err != nil { + return nil, err + } + + return volumeInfo, nil +} + +// waitForStorageVolumeToBecomeAvailable waits until a new Storage Volume is available (i.e. has finished initialising or updating). +func (c *StorageVolumeClient) waitForStorageVolumeToBecomeAvailable(name string, timeout time.Duration) (*StorageVolumeInfo, error) { + var waitResult *StorageVolumeInfo + + err := c.client.WaitFor( + fmt.Sprintf("storage volume %s to become available", c.getQualifiedName(name)), + timeout, + func() (bool, error) { + getRequest := &GetStorageVolumeInput{ + Name: name, + } + result, err := c.GetStorageVolume(getRequest) + + if err != nil { + return false, err + } + + if result != nil { + waitResult = result + if strings.ToLower(waitResult.Status) == "online" { + return true, nil + } + if strings.ToLower(waitResult.Status) == "error" { + return false, fmt.Errorf("Error Creating Storage Volume: %s", waitResult.StatusDetail) + } + } + + return false, nil + }) + + return waitResult, err +} + +// waitForStorageVolumeToBeDeleted waits until the specified storage volume has been deleted. +func (c *StorageVolumeClient) waitForStorageVolumeToBeDeleted(name string, timeout time.Duration) error { + return c.client.WaitFor( + fmt.Sprintf("storage volume %s to be deleted", c.getQualifiedName(name)), + timeout, + func() (bool, error) { + getRequest := &GetStorageVolumeInput{ + Name: name, + } + result, err := c.GetStorageVolume(getRequest) + if result == nil { + return true, nil + } + + if err != nil { + return false, err + } + + return result == nil, nil + }) +} + +func sizeInGigaBytes(input string) (string, error) { + sizeInBytes, err := strconv.Atoi(input) + if err != nil { + return "", err + } + sizeInKB := sizeInBytes / 1024 + sizeInMB := sizeInKB / 1024 + sizeInGb := sizeInMB / 1024 + return strconv.Itoa(sizeInGb), nil +} + +func sizeInBytes(input string) (string, error) { + sizeInGB, err := strconv.Atoi(input) + if err != nil { + return "", err + } + sizeInMB := sizeInGB * 1024 + sizeInKB := sizeInMB * 1024 + sizeInBytes := sizeInKB * 1024 + return strconv.Itoa(sizeInBytes), nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/test_utils.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/test_utils.go new file mode 100644 index 000000000..5667e097e --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/test_utils.go @@ -0,0 +1,119 @@ +package compute + +import ( + "bytes" + "encoding/json" + "log" + "net/http" + "net/http/httptest" + "net/url" + "os" + "testing" + "time" + + "github.com/hashicorp/go-oracle-terraform/opc" +) + +const ( + _ClientTestUser = "test-user" + _ClientTestDomain = "test-domain" +) + +func newAuthenticatingServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if os.Getenv("ORACLE_LOG") != "" { + log.Printf("[DEBUG] Received request: %s, %s\n", r.Method, r.URL) + } + + if r.URL.Path == "/authenticate/" { + http.SetCookie(w, &http.Cookie{Name: "testAuthCookie", Value: "cookie value"}) + // w.WriteHeader(200) + } else { + handler(w, r) + } + })) +} + +func getTestClient(c *opc.Config) (*ComputeClient, error) { + // Build up config with default values if omitted + if c.APIEndpoint == nil { + if os.Getenv("OPC_ENDPOINT") == "" { + panic("OPC_ENDPOINT not set in environment") + } + endpoint, err := url.Parse(os.Getenv("OPC_ENDPOINT")) + if err != nil { + return nil, err + } + c.APIEndpoint = endpoint + } + + if c.IdentityDomain == nil { + domain := os.Getenv("OPC_IDENTITY_DOMAIN") + c.IdentityDomain = &domain + } + + if c.Username == nil { + username := os.Getenv("OPC_USERNAME") + c.Username = &username + } + + if c.Password == nil { + password := os.Getenv("OPC_PASSWORD") + c.Password = &password + } + + if c.HTTPClient == nil { + c.HTTPClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSHandshakeTimeout: 120 * time.Second}, + } + } + + return NewComputeClient(c) +} + +func getBlankTestClient() (*ComputeClient, *httptest.Server, error) { + server := newAuthenticatingServer(func(w http.ResponseWriter, r *http.Request) { + }) + + endpoint, err := url.Parse(server.URL) + if err != nil { + server.Close() + return nil, nil, err + } + + client, err := getTestClient(&opc.Config{ + IdentityDomain: opc.String(_ClientTestDomain), + Username: opc.String(_ClientTestUser), + APIEndpoint: endpoint, + }) + if err != nil { + server.Close() + return nil, nil, err + } + return client, server, nil +} + +// Returns a stub client with default values, and a custom API Endpoint +func getStubClient(endpoint *url.URL) (*ComputeClient, error) { + domain := "test" + username := "test" + password := "test" + config := &opc.Config{ + IdentityDomain: &domain, + Username: &username, + Password: &password, + APIEndpoint: endpoint, + } + return getTestClient(config) +} + +func unmarshalRequestBody(t *testing.T, r *http.Request, target interface{}) { + buf := new(bytes.Buffer) + buf.ReadFrom(r.Body) + err := json.Unmarshal(buf.Bytes(), target) + if err != nil { + t.Fatalf("Error marshalling request: %s", err) + } +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic.go new file mode 100644 index 000000000..44509d7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic.go @@ -0,0 +1,52 @@ +package compute + +type VirtNICsClient struct { + ResourceClient +} + +func (c *ComputeClient) VirtNICs() *VirtNICsClient { + return &VirtNICsClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: "Virtual NIC", + ContainerPath: "/network/v1/vnic/", + ResourceRootPath: "/network/v1/vnic", + }, + } +} + +type VirtualNIC struct { + // Description of the object. + Description string `json:"description"` + // MAC address of this VNIC. + MACAddress string `json:"macAddress"` + // The three-part name (/Compute-identity_domain/user/object) of the Virtual NIC. + Name string `json:"name"` + // Tags associated with the object. + Tags []string `json:"tags"` + // True if the VNIC is of type "transit". + TransitFlag bool `json:"transitFlag"` + // Uniform Resource Identifier + Uri string `json:"uri"` +} + +// Can only GET a virtual NIC, not update, create, or delete +type GetVirtualNICInput struct { + // The three-part name (/Compute-identity_domain/user/object) of the Virtual NIC. + // Required + Name string `json:"name"` +} + +func (c *VirtNICsClient) GetVirtualNIC(input *GetVirtualNICInput) (*VirtualNIC, error) { + var virtNIC VirtualNIC + input.Name = c.getQualifiedName(input.Name) + if err := c.getResource(input.Name, &virtNIC); err != nil { + return nil, err + } + return c.success(&virtNIC) +} + +func (c *VirtNICsClient) success(info *VirtualNIC) (*VirtualNIC, error) { + c.unqualify(&info.Name) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic_sets.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic_sets.go new file mode 100644 index 000000000..85762e52f --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic_sets.go @@ -0,0 +1,154 @@ +package compute + +type VirtNICSetsClient struct { + ResourceClient +} + +func (c *ComputeClient) VirtNICSets() *VirtNICSetsClient { + return &VirtNICSetsClient{ + ResourceClient: ResourceClient{ + ComputeClient: c, + ResourceDescription: "Virtual NIC Set", + ContainerPath: "/network/v1/vnicset/", + ResourceRootPath: "/network/v1/vnicset", + }, + } +} + +// Describes an existing virtual nic set +type VirtualNICSet struct { + // List of ACLs applied to the VNICs in the set. + AppliedACLs []string `json:"appliedAcls"` + // Description of the VNIC Set. + Description string `json:"description"` + // Name of the VNIC set. + Name string `json:"name"` + // The three-part name (/Compute-identity_domain/user/object) of the virtual NIC set. + Tags []string `json:"tags"` + // Uniform Resource Identifier + Uri string `json:"uri"` + // List of VNICs associated with this VNIC set. + VirtualNICs []string `json:"vnics"` +} + +type CreateVirtualNICSetInput struct { + // List of ACLs applied to the VNICs in the set. + // Optional + AppliedACLs []string `json:"appliedAcls"` + // Description of the object. + // Optional + Description string `json:"description"` + // The three-part name (/Compute-identity_domain/user/object) of the virtual NIC set. + // Object names can contain only alphanumeric, underscore (_), dash (-), and period (.) characters. Object names are case-sensitive. + // Required + Name string `json:"name"` + // Tags associated with this VNIC set. + // Optional + Tags []string `json:"tags"` + // List of VNICs associated with this VNIC set. + // Optional + VirtualNICs []string `json:"vnics"` +} + +func (c *VirtNICSetsClient) CreateVirtualNICSet(input *CreateVirtualNICSetInput) (*VirtualNICSet, error) { + input.Name = c.getQualifiedName(input.Name) + input.AppliedACLs = c.getQualifiedAcls(input.AppliedACLs) + qualifiedNics := c.getQualifiedList(input.VirtualNICs) + if len(qualifiedNics) != 0 { + input.VirtualNICs = qualifiedNics + } + + var virtNicSet VirtualNICSet + if err := c.createResource(input, &virtNicSet); err != nil { + return nil, err + } + + return c.success(&virtNicSet) +} + +type GetVirtualNICSetInput struct { + // The three-part name (/Compute-identity_domain/user/object) of the virtual NIC set. + // Required + Name string `json:"name"` +} + +func (c *VirtNICSetsClient) GetVirtualNICSet(input *GetVirtualNICSetInput) (*VirtualNICSet, error) { + var virtNicSet VirtualNICSet + // Qualify Name + input.Name = c.getQualifiedName(input.Name) + if err := c.getResource(input.Name, &virtNicSet); err != nil { + return nil, err + } + + return c.success(&virtNicSet) +} + +type UpdateVirtualNICSetInput struct { + // List of ACLs applied to the VNICs in the set. + // Optional + AppliedACLs []string `json:"appliedAcls"` + // Description of the object. + // Optional + Description string `json:"description"` + // The three-part name (/Compute-identity_domain/user/object) of the virtual NIC set. + // Object names can contain only alphanumeric, underscore (_), dash (-), and period (.) characters. Object names are case-sensitive. + // Required + Name string `json:"name"` + // Tags associated with this VNIC set. + // Optional + Tags []string `json:"tags"` + // List of VNICs associated with this VNIC set. + // Optional + VirtualNICs []string `json:"vnics"` +} + +func (c *VirtNICSetsClient) UpdateVirtualNICSet(input *UpdateVirtualNICSetInput) (*VirtualNICSet, error) { + input.Name = c.getQualifiedName(input.Name) + input.AppliedACLs = c.getQualifiedAcls(input.AppliedACLs) + // Qualify VirtualNICs + qualifiedVNICs := c.getQualifiedList(input.VirtualNICs) + if len(qualifiedVNICs) != 0 { + input.VirtualNICs = qualifiedVNICs + } + + var virtNICSet VirtualNICSet + if err := c.updateResource(input.Name, input, &virtNICSet); err != nil { + return nil, err + } + + return c.success(&virtNICSet) +} + +type DeleteVirtualNICSetInput struct { + // The name of the virtual NIC set. + // Required + Name string `json:"name"` +} + +func (c *VirtNICSetsClient) DeleteVirtualNICSet(input *DeleteVirtualNICSetInput) error { + input.Name = c.getQualifiedName(input.Name) + return c.deleteResource(input.Name) +} + +func (c *VirtNICSetsClient) getQualifiedAcls(acls []string) []string { + qualifiedAcls := []string{} + for _, acl := range acls { + qualifiedAcls = append(qualifiedAcls, c.getQualifiedName(acl)) + } + return qualifiedAcls +} + +func (c *VirtNICSetsClient) unqualifyAcls(acls []string) []string { + unqualifiedAcls := []string{} + for _, acl := range acls { + unqualifiedAcls = append(unqualifiedAcls, c.getUnqualifiedName(acl)) + } + return unqualifiedAcls +} + +func (c *VirtNICSetsClient) success(info *VirtualNICSet) (*VirtualNICSet, error) { + c.unqualify(&info.Name) + info.AppliedACLs = c.unqualifyAcls(info.AppliedACLs) + info.VirtualNICs = c.getUnqualifiedList(info.VirtualNICs) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/opc/config.go b/vendor/github.com/hashicorp/go-oracle-terraform/opc/config.go new file mode 100644 index 000000000..5c144a66d --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/opc/config.go @@ -0,0 +1,22 @@ +package opc + +import ( + "net/http" + "net/url" +) + +type Config struct { + Username *string + Password *string + IdentityDomain *string + APIEndpoint *url.URL + MaxRetries *int + LogLevel LogLevelType + Logger Logger + HTTPClient *http.Client + UserAgent *string +} + +func NewConfig() *Config { + return &Config{} +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/opc/convert.go b/vendor/github.com/hashicorp/go-oracle-terraform/opc/convert.go new file mode 100644 index 000000000..52fa08902 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/opc/convert.go @@ -0,0 +1,9 @@ +package opc + +func String(v string) *string { + return &v +} + +func Int(v int) *int { + return &v +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/opc/errors.go b/vendor/github.com/hashicorp/go-oracle-terraform/opc/errors.go new file mode 100644 index 000000000..6b12c10d9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/opc/errors.go @@ -0,0 +1,12 @@ +package opc + +import "fmt" + +type OracleError struct { + StatusCode int + Message string +} + +func (e OracleError) Error() string { + return fmt.Sprintf("%d: %s", e.StatusCode, e.Message) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/opc/logger.go b/vendor/github.com/hashicorp/go-oracle-terraform/opc/logger.go new file mode 100644 index 000000000..f9714a7a8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/opc/logger.go @@ -0,0 +1,70 @@ +package opc + +import ( + "io" + "io/ioutil" + "log" + "os" +) + +const ( + LogOff LogLevelType = 0 + LogDebug LogLevelType = 1 +) + +type LogLevelType uint + +// Logger interface. Should be satisfied by Terraform's logger as well as the Default logger +type Logger interface { + Log(...interface{}) +} + +type LoggerFunc func(...interface{}) + +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// Returns a default logger if one isn't specified during configuration +func NewDefaultLogger() Logger { + logWriter, err := LogOutput() + if err != nil { + log.Fatalf("Error setting up log writer: %s", err) + } + return &defaultLogger{ + logger: log.New(logWriter, "", log.LstdFlags), + } +} + +// Default logger to satisfy the logger interface +type defaultLogger struct { + logger *log.Logger +} + +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} + +func LogOutput() (logOutput io.Writer, err error) { + // Default to nil + logOutput = ioutil.Discard + + logLevel := LogLevel() + if logLevel == LogOff { + return + } + + // Logging is on, set output to STDERR + logOutput = os.Stderr + return +} + +// Gets current Log Level from the ORACLE_LOG env var +func LogLevel() LogLevelType { + envLevel := os.Getenv("ORACLE_LOG") + if envLevel == "" { + return LogOff + } else { + return LogDebug + } +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 2f8f8307d..fe36fe283 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -796,6 +796,24 @@ "revision": "5a9a298c54339d2296d2f1135eae55a3a8f5e8c2", "revisionTime": "2018-01-11T20:31:13Z" }, + { + "checksumSHA1": "hjQfXn32Tvuu6IJACOTsMzm+AbA=", + "path": "github.com/hashicorp/go-oracle-terraform/client", + "revision": "5a9a298c54339d2296d2f1135eae55a3a8f5e8c2", + "revisionTime": "2018-01-11T20:31:13Z" + }, + { + "checksumSHA1": "RhoE7zmHsn5zoXbx5AsAUUQI72E=", + "path": "github.com/hashicorp/go-oracle-terraform/compute", + "revision": "5a9a298c54339d2296d2f1135eae55a3a8f5e8c2", + "revisionTime": "2018-01-11T20:31:13Z" + }, + { + "checksumSHA1": "NuObCk0/ybL3w++EnltgrB1GQRc=", + "path": "github.com/hashicorp/go-oracle-terraform/opc", + "revision": "5a9a298c54339d2296d2f1135eae55a3a8f5e8c2", + "revisionTime": "2018-01-11T20:31:13Z" + }, { "checksumSHA1": "ErJHGU6AVPZM9yoY/xV11TwSjQs=", "path": "github.com/hashicorp/go-retryablehttp", From 75ee66f934aa5aff050628a22802c28979617c5a Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 12 Jan 2018 16:06:03 -0800 Subject: [PATCH 070/251] add stubbed out steps --- .../classic/step_create_IP_reservation.go | 24 +++++++++++++ .../oracle/classic/step_create_instance.go | 34 +++++++++++++++++++ builder/oracle/classic/step_instance_info.go | 24 +++++++++++++ 3 files changed, 82 insertions(+) create mode 100644 builder/oracle/classic/step_create_IP_reservation.go create mode 100644 builder/oracle/classic/step_create_instance.go create mode 100644 builder/oracle/classic/step_instance_info.go diff --git a/builder/oracle/classic/step_create_IP_reservation.go b/builder/oracle/classic/step_create_IP_reservation.go new file mode 100644 index 000000000..a719999dd --- /dev/null +++ b/builder/oracle/classic/step_create_IP_reservation.go @@ -0,0 +1,24 @@ +package classic + +import ( + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" +) + +type stepCreateIPReservation struct{} + +func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + ui.Say("Creating IP reservation...") + const endpoint_path = "/ip/reservation/" // POST + + // $ opc compute ip-reservations add \ + // /Compute-mydomain/user@example.com/master-instance-ip \ + // /oracle/public/ippool + + // account /Compute-mydomain/default + // ip 129.144.27.172 + // name /Compute-mydomain/user@example.com/master-instance-ip + // ... + +} diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go new file mode 100644 index 000000000..6678979bf --- /dev/null +++ b/builder/oracle/classic/step_create_instance.go @@ -0,0 +1,34 @@ +package classic + +import ( + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" +) + +type stepCreateIPReservation struct{} + +func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + ui.Say("Creating Instance...") + const endpoint_path = "/launchplan/" // POST + // master-instance.json + + // { + // "instances": [{ + // "shape": "oc3", + // "sshkeys": ["/Compute-mydomain/user@example.com/my_sshkey"], + // "name": "Compute-mydomain/user@example.com/master-instance", + // "label": "master-instance", + // "imagelist": "/Compute-mydomain/user@example.com/Ubuntu.16.04-LTS.amd64.20170330", + // "networking": { + // "eth0": { + // "nat": "ipreservation:/Compute-mydomain/user@example.com/master-instance-ip" + // } + // } + // }] + // } + // command line call + // $ opc compute launch-plans add --request-body=./master-instance.json + // ... + +} diff --git a/builder/oracle/classic/step_instance_info.go b/builder/oracle/classic/step_instance_info.go new file mode 100644 index 000000000..a2f935a99 --- /dev/null +++ b/builder/oracle/classic/step_instance_info.go @@ -0,0 +1,24 @@ +package classic + +import ( + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" +) + +type stepCreateIPReservation struct{} + +func (s *stepInstanceInfo) Run(state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + ui.Say("Getting Instance Info...") + endpoint_path := "/instance/%s", instanceName // GET + + // $ opc compute ip-reservations add \ + // /Compute-mydomain/user@example.com/master-instance-ip \ + // /oracle/public/ippool + + // account /Compute-mydomain/default + // ip 129.144.27.172 + // name /Compute-mydomain/user@example.com/master-instance-ip + // ... + +} From 4fe89be32ad368915570edd2e5cdc09b4feef406 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 12 Jan 2018 16:18:55 -0800 Subject: [PATCH 071/251] fleshing out steps --- .../oracle/classic/step_create_instance.go | 46 ++++++++++++------- builder/oracle/classic/step_instance_info.go | 16 ++----- 2 files changed, 35 insertions(+), 27 deletions(-) diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index 6678979bf..4b9d33c0a 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -1,34 +1,48 @@ package classic import ( + "fmt" + "github.com/hashicorp/packer/packer" "github.com/mitchellh/multistep" ) -type stepCreateIPReservation struct{} +type stepCreateInstance struct{} func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say("Creating Instance...") const endpoint_path = "/launchplan/" // POST // master-instance.json - - // { - // "instances": [{ - // "shape": "oc3", - // "sshkeys": ["/Compute-mydomain/user@example.com/my_sshkey"], - // "name": "Compute-mydomain/user@example.com/master-instance", - // "label": "master-instance", - // "imagelist": "/Compute-mydomain/user@example.com/Ubuntu.16.04-LTS.amd64.20170330", - // "networking": { - // "eth0": { - // "nat": "ipreservation:/Compute-mydomain/user@example.com/master-instance-ip" - // } - // } - // }] - // } + ` + { + "instances": [{ + "shape": "oc3", + "sshkeys": ["/Compute-mydomain/user@example.com/my_sshkey"], + "name": "Compute-mydomain/user@example.com/master-instance", + "label": "master-instance", + "imagelist": "/Compute-mydomain/user@example.com/Ubuntu.16.04-LTS.amd64.20170330", + "networking": { + "eth0": { + "nat": "ipreservation:/Compute-mydomain/user@example.com/master-instance-ip" + } + } + }] + } + ` // command line call // $ opc compute launch-plans add --request-body=./master-instance.json // ... + instanceID, err := client.CreateInstance(publicKey) + if err != nil { + err = fmt.Errorf("Problem creating instance: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } + + state.Put("instance_id", instanceID) + + ui.Say(fmt.Sprintf("Created instance (%s).", instanceID)) } diff --git a/builder/oracle/classic/step_instance_info.go b/builder/oracle/classic/step_instance_info.go index a2f935a99..9ec01cdbc 100644 --- a/builder/oracle/classic/step_instance_info.go +++ b/builder/oracle/classic/step_instance_info.go @@ -5,20 +5,14 @@ import ( "github.com/mitchellh/multistep" ) -type stepCreateIPReservation struct{} +type stepInstanceInfo struct{} func (s *stepInstanceInfo) Run(state multistep.StateBag) multistep.StepAction { - ui := state.Get("ui").(packer.Ui) ui.Say("Getting Instance Info...") - endpoint_path := "/instance/%s", instanceName // GET + ui := state.Get("ui").(packer.Ui) + instanceID := state.Get("instance_id").(string) + endpoint_path := "/instance/%s", instanceID // GET - // $ opc compute ip-reservations add \ - // /Compute-mydomain/user@example.com/master-instance-ip \ - // /oracle/public/ippool - - // account /Compute-mydomain/default - // ip 129.144.27.172 - // name /Compute-mydomain/user@example.com/master-instance-ip - // ... + // https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsa/op-instance-%7Bname%7D-get.html } From a66dfe19721f31a55c7767911cc2ff8e0ae1d8ed Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 12 Jan 2018 16:48:35 -0800 Subject: [PATCH 072/251] fleshing out step_create_instance --- .../oracle/classic/step_create_instance.go | 57 +++++++++++++------ 1 file changed, 39 insertions(+), 18 deletions(-) diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index 4b9d33c0a..dce6b0d78 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -2,34 +2,55 @@ package classic import ( "fmt" + "text/template" "github.com/hashicorp/packer/packer" "github.com/mitchellh/multistep" ) +type instanceOptions struct { + Username string + IdentityDomain string + SshKey string + Shape string + ImageList string + InstanceIP string +} + +var instanceTemplate = template.Must(template.New("instanceRequestBody").Parse(` +{ + "instances": [{ + "shape": "{{.Shape}}", + "sshkeys": ["/Compute-{{.IdentityDomain}}/{{Username}}/{{.SshKey}}"], + "name": "Compute-{{.IdentityDomain}}/{{Username}}/packer-instance", + "label": "packer-instance", + "imagelist": "/Compute-{{.IdentityDomain}}/{{Username}}/{{.ImageList}}", + "networking": { + "eth0": { + "nat": "ipreservation:/Compute-{{.IdentityDomain}}/{{Username}}/{{.InstanceIP}}" + } + } + }] +} +`)) + type stepCreateInstance struct{} func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) - ui.Say("Creating Instance...") + config := state.Get("config").(Config) const endpoint_path = "/launchplan/" // POST - // master-instance.json - ` - { - "instances": [{ - "shape": "oc3", - "sshkeys": ["/Compute-mydomain/user@example.com/my_sshkey"], - "name": "Compute-mydomain/user@example.com/master-instance", - "label": "master-instance", - "imagelist": "/Compute-mydomain/user@example.com/Ubuntu.16.04-LTS.amd64.20170330", - "networking": { - "eth0": { - "nat": "ipreservation:/Compute-mydomain/user@example.com/master-instance-ip" - } - } - }] - } - ` + + ui.Say("Creating Instance...") + + // generate launch plan definition for this instance + err = instanceTemplate.Execute(&buffer, instanceOptions{ + Username: config.Username, + IdentityDomain: config.IdentityDomain, + SshKey: config.SshKey, + Shape: config.Shape, + ImageList: config.ImageList, + }) // command line call // $ opc compute launch-plans add --request-body=./master-instance.json // ... From 0117f537211d1c8716e9515f6a49770ed5eca1f1 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 12 Jan 2018 16:51:59 -0800 Subject: [PATCH 073/251] add error message --- builder/oracle/classic/step_create_instance.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index dce6b0d78..e6fbb8798 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -51,6 +51,10 @@ func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAc Shape: config.Shape, ImageList: config.ImageList, }) + if err != nil { + fmt.Printf("Error creating launch plan definition: %s", err) + return "", err + } // command line call // $ opc compute launch-plans add --request-body=./master-instance.json // ... From 7d72870179895a397acc3c5bff7ba355909ff8f1 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 16 Jan 2018 12:09:42 -0800 Subject: [PATCH 074/251] add buffer to read template into --- builder/oracle/classic/step_create_instance.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index e6fbb8798..e461dd5ab 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -1,6 +1,7 @@ package classic import ( + "bytes" "fmt" "text/template" @@ -44,6 +45,7 @@ func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAc ui.Say("Creating Instance...") // generate launch plan definition for this instance + var buffer bytes.Buffer err = instanceTemplate.Execute(&buffer, instanceOptions{ Username: config.Username, IdentityDomain: config.IdentityDomain, @@ -55,10 +57,8 @@ func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAc fmt.Printf("Error creating launch plan definition: %s", err) return "", err } - // command line call - // $ opc compute launch-plans add --request-body=./master-instance.json - // ... - + // for API docs see + // https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsa/op-launchplan--post.html instanceID, err := client.CreateInstance(publicKey) if err != nil { err = fmt.Errorf("Problem creating instance: %s", err) From 007e8f7c14203ef81b23e82f0ead74e4f185a187 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 16 Jan 2018 14:28:24 -0800 Subject: [PATCH 075/251] finish stubbing out step_create_IP_reservation --- builder/oracle/classic/builder.go | 3 +- .../classic/step_create_IP_reservation.go | 32 ++++++++++++------- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go index b8af96345..4867ff34e 100644 --- a/builder/oracle/classic/builder.go +++ b/builder/oracle/classic/builder.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/go-oracle-terraform/opc" "github.com/hashicorp/packer/common" + "github.com/hashicorp/packer/helper/communicator" "github.com/hashicorp/packer/packer" "github.com/mitchellh/multistep" ) @@ -58,7 +59,6 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe // Build the steps steps := []multistep.Step{ - /* &ocommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("oci_classic_%s.pem", b.config.PackerBuildName), @@ -75,7 +75,6 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, &common.StepProvision{}, &stepSnapshot{}, - */ } // Run the steps diff --git a/builder/oracle/classic/step_create_IP_reservation.go b/builder/oracle/classic/step_create_IP_reservation.go index a719999dd..415b3ddac 100644 --- a/builder/oracle/classic/step_create_IP_reservation.go +++ b/builder/oracle/classic/step_create_IP_reservation.go @@ -1,6 +1,9 @@ package classic import ( + "log" + + "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/packer/packer" "github.com/mitchellh/multistep" ) @@ -10,15 +13,22 @@ type stepCreateIPReservation struct{} func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say("Creating IP reservation...") - const endpoint_path = "/ip/reservation/" // POST - - // $ opc compute ip-reservations add \ - // /Compute-mydomain/user@example.com/master-instance-ip \ - // /oracle/public/ippool - - // account /Compute-mydomain/default - // ip 129.144.27.172 - // name /Compute-mydomain/user@example.com/master-instance-ip - // ... - + client := state.Get("client", client).(*compute.ComputeClient) + iprClient := client.IPReservations() + if err != nil { + log.Printf("Error getting IPReservations Client: %s", err) + return multistep.ActionHalt + } + // TODO: add optional Name and Tags + IPInput := &iprClient.CreateIPReservationInput{ + ParentPool: compute.PublicReservationPool, + Permanent: true, + } + ipRes, err := iprClient.CreateIPReservation(createIPReservation) + if err != nil { + log.Printf("Error creating IP Reservation: %s", err) + return multistep.ActionHalt + } + log.Printf("debug: ipRes is %#v", ipRes) + return multistep.ActionContinue } From 8aa716cd4cb559dd0ec26a8f1a3be37ba3b7eaee Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 16 Jan 2018 16:55:39 -0800 Subject: [PATCH 076/251] stub out step_snapshot --- .../classic/step_create_IP_reservation.go | 1 + .../oracle/classic/step_create_instance.go | 70 ++++++------------- builder/oracle/classic/step_snapshot.go | 39 +++++++++++ 3 files changed, 61 insertions(+), 49 deletions(-) create mode 100644 builder/oracle/classic/step_snapshot.go diff --git a/builder/oracle/classic/step_create_IP_reservation.go b/builder/oracle/classic/step_create_IP_reservation.go index 415b3ddac..797063b1c 100644 --- a/builder/oracle/classic/step_create_IP_reservation.go +++ b/builder/oracle/classic/step_create_IP_reservation.go @@ -29,6 +29,7 @@ func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAc log.Printf("Error creating IP Reservation: %s", err) return multistep.ActionHalt } + state.Put("instance_ip", ipRes.IP) log.Printf("debug: ipRes is %#v", ipRes) return multistep.ActionContinue } diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index e461dd5ab..f94d6d0b7 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -1,65 +1,37 @@ package classic import ( - "bytes" "fmt" - "text/template" + "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/packer/packer" "github.com/mitchellh/multistep" ) -type instanceOptions struct { - Username string - IdentityDomain string - SshKey string - Shape string - ImageList string - InstanceIP string -} - -var instanceTemplate = template.Must(template.New("instanceRequestBody").Parse(` -{ - "instances": [{ - "shape": "{{.Shape}}", - "sshkeys": ["/Compute-{{.IdentityDomain}}/{{Username}}/{{.SshKey}}"], - "name": "Compute-{{.IdentityDomain}}/{{Username}}/packer-instance", - "label": "packer-instance", - "imagelist": "/Compute-{{.IdentityDomain}}/{{Username}}/{{.ImageList}}", - "networking": { - "eth0": { - "nat": "ipreservation:/Compute-{{.IdentityDomain}}/{{Username}}/{{.InstanceIP}}" - } - } - }] -} -`)) - type stepCreateInstance struct{} -func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAction { - ui := state.Get("ui").(packer.Ui) - config := state.Get("config").(Config) - const endpoint_path = "/launchplan/" // POST - +func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction { ui.Say("Creating Instance...") - // generate launch plan definition for this instance - var buffer bytes.Buffer - err = instanceTemplate.Execute(&buffer, instanceOptions{ - Username: config.Username, - IdentityDomain: config.IdentityDomain, - SshKey: config.SshKey, - Shape: config.Shape, - ImageList: config.ImageList, - }) - if err != nil { - fmt.Printf("Error creating launch plan definition: %s", err) - return "", err + // get variables from state + ui := state.Get("ui").(packer.Ui) + config := state.Get("config").(Config) + client := state.Get("client").(*compute.ComputeClient) + sshPublicKey := state.Get("publicKey").(string) + + // get instances client + instanceClient := client.Instances() + + // Instances Input + input := &compute.CreateInstanceInput{ + Name: config.ImageName, + Shape: config.Shape, + ImageList: config.ImageList, + SSHKeys: []string{}, + Attributes: map[string]interface{}{}, } - // for API docs see - // https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsa/op-launchplan--post.html - instanceID, err := client.CreateInstance(publicKey) + + instanceInfo, err := instanceClient.CreateInstance(input) if err != nil { err = fmt.Errorf("Problem creating instance: %s", err) ui.Error(err.Error()) @@ -67,7 +39,7 @@ func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAc return multistep.ActionHalt } - state.Put("instance_id", instanceID) + state.Put("instance_id", instanceInfo.ID) ui.Say(fmt.Sprintf("Created instance (%s).", instanceID)) } diff --git a/builder/oracle/classic/step_snapshot.go b/builder/oracle/classic/step_snapshot.go new file mode 100644 index 000000000..582966a83 --- /dev/null +++ b/builder/oracle/classic/step_snapshot.go @@ -0,0 +1,39 @@ +package classic + +import ( + "fmt" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" +) + +type stepSnapshot struct{} + +func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { + // get variables from state + ui := state.Get("ui").(packer.Ui) + ui.Say("Creating Snapshot...") + config := state.Get("config").(Config) + client := state.Get("client").(*compute.ComputeClient) + instanceID := state.Get("instance_id").(string) + + // get instances client + snapshotClient := client.SnapshotsClient() + + // Instances Input + createSnapshotInput := &CreateSnapshotInput{ + Instance: instanceID, + MachineImage: config.ImageName, + } + + snap, err := snapshotClient.CreateSnapshot(input) + if err != nil { + err = fmt.Errorf("Problem creating snapshot: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } + + ui.Say(fmt.Sprintf("Created snapshot (%s).", snap.Name)) +} From 46c3113613355d7f9ba928014eaa1dc66d5c7014 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 17 Jan 2018 13:07:41 -0800 Subject: [PATCH 077/251] it compiles :) --- builder/oracle/classic/builder.go | 5 +- builder/oracle/classic/config.go | 1 + .../classic/step_create_IP_reservation.go | 14 +- .../oracle/classic/step_create_instance.go | 13 +- builder/oracle/classic/step_instance_info.go | 18 -- builder/oracle/classic/step_snapshot.go | 11 +- builder/oracle/oci/builder.go | 4 +- builder/oracle/oci/ssh.go | 45 ---- command/plugin.go | 198 +++++++++--------- 9 files changed, 131 insertions(+), 178 deletions(-) delete mode 100644 builder/oracle/classic/step_instance_info.go delete mode 100644 builder/oracle/oci/ssh.go diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go index 4867ff34e..4501456d2 100644 --- a/builder/oracle/classic/builder.go +++ b/builder/oracle/classic/builder.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/go-oracle-terraform/opc" + ocommon "github.com/hashicorp/packer/builder/oracle/common" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/communicator" "github.com/hashicorp/packer/packer" @@ -68,8 +69,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &stepCreateInstance{}, &communicator.StepConnect{ Config: &b.config.Comm, - Host: commHost, - SSHConfig: SSHConfig( + Host: ocommon.CommHost, + SSHConfig: ocommon.SSHConfig( b.config.Comm.SSHUsername, b.config.Comm.SSHPassword), }, diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index 050f78d91..10ec44c54 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -24,6 +24,7 @@ type Config struct { apiEndpointURL *url.URL // Image + ImageName string `mapstructure:"image_name"` Shape string `mapstructure:"shape"` ImageList string `json:"image_list"` diff --git a/builder/oracle/classic/step_create_IP_reservation.go b/builder/oracle/classic/step_create_IP_reservation.go index 797063b1c..f3f898eaa 100644 --- a/builder/oracle/classic/step_create_IP_reservation.go +++ b/builder/oracle/classic/step_create_IP_reservation.go @@ -13,18 +13,14 @@ type stepCreateIPReservation struct{} func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say("Creating IP reservation...") - client := state.Get("client", client).(*compute.ComputeClient) + client := state.Get("client").(*compute.ComputeClient) iprClient := client.IPReservations() - if err != nil { - log.Printf("Error getting IPReservations Client: %s", err) - return multistep.ActionHalt - } // TODO: add optional Name and Tags - IPInput := &iprClient.CreateIPReservationInput{ + IPInput := &compute.CreateIPReservationInput{ ParentPool: compute.PublicReservationPool, Permanent: true, } - ipRes, err := iprClient.CreateIPReservation(createIPReservation) + ipRes, err := iprClient.CreateIPReservation(IPInput) if err != nil { log.Printf("Error creating IP Reservation: %s", err) return multistep.ActionHalt @@ -33,3 +29,7 @@ func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAc log.Printf("debug: ipRes is %#v", ipRes) return multistep.ActionContinue } + +func (s *stepCreateIPReservation) Cleanup(state multistep.StateBag) { + // Nothing to do +} diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index f94d6d0b7..7de599190 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -11,10 +11,9 @@ import ( type stepCreateInstance struct{} func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction { - ui.Say("Creating Instance...") - // get variables from state ui := state.Get("ui").(packer.Ui) + ui.Say("Creating Instance...") config := state.Get("config").(Config) client := state.Get("client").(*compute.ComputeClient) sshPublicKey := state.Get("publicKey").(string) @@ -27,7 +26,7 @@ func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction Name: config.ImageName, Shape: config.Shape, ImageList: config.ImageList, - SSHKeys: []string{}, + SSHKeys: []string{sshPublicKey}, Attributes: map[string]interface{}{}, } @@ -40,6 +39,10 @@ func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction } state.Put("instance_id", instanceInfo.ID) - - ui.Say(fmt.Sprintf("Created instance (%s).", instanceID)) + ui.Say(fmt.Sprintf("Created instance (%s).", instanceInfo.ID)) + return multistep.ActionContinue +} + +func (s *stepCreateInstance) Cleanup(state multistep.StateBag) { + // Nothing to do } diff --git a/builder/oracle/classic/step_instance_info.go b/builder/oracle/classic/step_instance_info.go deleted file mode 100644 index 9ec01cdbc..000000000 --- a/builder/oracle/classic/step_instance_info.go +++ /dev/null @@ -1,18 +0,0 @@ -package classic - -import ( - "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" -) - -type stepInstanceInfo struct{} - -func (s *stepInstanceInfo) Run(state multistep.StateBag) multistep.StepAction { - ui.Say("Getting Instance Info...") - ui := state.Get("ui").(packer.Ui) - instanceID := state.Get("instance_id").(string) - endpoint_path := "/instance/%s", instanceID // GET - - // https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsa/op-instance-%7Bname%7D-get.html - -} diff --git a/builder/oracle/classic/step_snapshot.go b/builder/oracle/classic/step_snapshot.go index 582966a83..2df8976a1 100644 --- a/builder/oracle/classic/step_snapshot.go +++ b/builder/oracle/classic/step_snapshot.go @@ -19,15 +19,15 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { instanceID := state.Get("instance_id").(string) // get instances client - snapshotClient := client.SnapshotsClient() + snapshotClient := client.Snapshots() // Instances Input - createSnapshotInput := &CreateSnapshotInput{ + snapshotInput := &compute.CreateSnapshotInput{ Instance: instanceID, MachineImage: config.ImageName, } - snap, err := snapshotClient.CreateSnapshot(input) + snap, err := snapshotClient.CreateSnapshot(snapshotInput) if err != nil { err = fmt.Errorf("Problem creating snapshot: %s", err) ui.Error(err.Error()) @@ -36,4 +36,9 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { } ui.Say(fmt.Sprintf("Created snapshot (%s).", snap.Name)) + return multistep.ActionContinue +} + +func (s *stepSnapshot) Cleanup(state multistep.StateBag) { + // Nothing to do } diff --git a/builder/oracle/oci/builder.go b/builder/oracle/oci/builder.go index 07d41b756..612fdf4f6 100644 --- a/builder/oracle/oci/builder.go +++ b/builder/oracle/oci/builder.go @@ -60,8 +60,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &stepInstanceInfo{}, &communicator.StepConnect{ Config: &b.config.Comm, - Host: commHost, - SSHConfig: SSHConfig( + Host: ocommon.CommHost, + SSHConfig: ocommon.SSHConfig( b.config.Comm.SSHUsername, b.config.Comm.SSHPassword), }, diff --git a/builder/oracle/oci/ssh.go b/builder/oracle/oci/ssh.go deleted file mode 100644 index 5424b94b4..000000000 --- a/builder/oracle/oci/ssh.go +++ /dev/null @@ -1,45 +0,0 @@ -package oci - -import ( - "fmt" - - packerssh "github.com/hashicorp/packer/communicator/ssh" - "github.com/hashicorp/packer/helper/multistep" - "golang.org/x/crypto/ssh" -) - -func commHost(state multistep.StateBag) (string, error) { - ipAddress := state.Get("instance_ip").(string) - return ipAddress, nil -} - -// SSHConfig returns a function that can be used for the SSH communicator -// config for connecting to the instance created over SSH using the private key -// or password. -func SSHConfig(username, password string) func(state multistep.StateBag) (*ssh.ClientConfig, error) { - return func(state multistep.StateBag) (*ssh.ClientConfig, error) { - privateKey, hasKey := state.GetOk("privateKey") - if hasKey { - - signer, err := ssh.ParsePrivateKey([]byte(privateKey.(string))) - if err != nil { - return nil, fmt.Errorf("Error setting up SSH config: %s", err) - } - return &ssh.ClientConfig{ - User: username, - Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - }, nil - - } - - return &ssh.ClientConfig{ - User: username, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - Auth: []ssh.AuthMethod{ - ssh.Password(password), - ssh.KeyboardInteractive(packerssh.PasswordKeyboardInteractive(password)), - }, - }, nil - } -} diff --git a/command/plugin.go b/command/plugin.go index d9e7ea577..4f4228fc3 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -14,65 +14,67 @@ import ( "github.com/hashicorp/packer/packer/plugin" alicloudecsbuilder "github.com/hashicorp/packer/builder/alicloud/ecs" + alicloudimportpostprocessor "github.com/hashicorp/packer/post-processor/alicloud-import" amazonchrootbuilder "github.com/hashicorp/packer/builder/amazon/chroot" amazonebsbuilder "github.com/hashicorp/packer/builder/amazon/ebs" amazonebssurrogatebuilder "github.com/hashicorp/packer/builder/amazon/ebssurrogate" amazonebsvolumebuilder "github.com/hashicorp/packer/builder/amazon/ebsvolume" - amazoninstancebuilder "github.com/hashicorp/packer/builder/amazon/instance" - azurearmbuilder "github.com/hashicorp/packer/builder/azure/arm" - cloudstackbuilder "github.com/hashicorp/packer/builder/cloudstack" - digitaloceanbuilder "github.com/hashicorp/packer/builder/digitalocean" - dockerbuilder "github.com/hashicorp/packer/builder/docker" - filebuilder "github.com/hashicorp/packer/builder/file" - googlecomputebuilder "github.com/hashicorp/packer/builder/googlecompute" - hypervisobuilder "github.com/hashicorp/packer/builder/hyperv/iso" - hypervvmcxbuilder "github.com/hashicorp/packer/builder/hyperv/vmcx" - lxcbuilder "github.com/hashicorp/packer/builder/lxc" - lxdbuilder "github.com/hashicorp/packer/builder/lxd" - nullbuilder "github.com/hashicorp/packer/builder/null" - oneandonebuilder "github.com/hashicorp/packer/builder/oneandone" - openstackbuilder "github.com/hashicorp/packer/builder/openstack" - oracleocibuilder "github.com/hashicorp/packer/builder/oracle/oci" - parallelsisobuilder "github.com/hashicorp/packer/builder/parallels/iso" - parallelspvmbuilder "github.com/hashicorp/packer/builder/parallels/pvm" - profitbricksbuilder "github.com/hashicorp/packer/builder/profitbricks" - qemubuilder "github.com/hashicorp/packer/builder/qemu" - tritonbuilder "github.com/hashicorp/packer/builder/triton" - virtualboxisobuilder "github.com/hashicorp/packer/builder/virtualbox/iso" - virtualboxovfbuilder "github.com/hashicorp/packer/builder/virtualbox/ovf" - vmwareisobuilder "github.com/hashicorp/packer/builder/vmware/iso" - vmwarevmxbuilder "github.com/hashicorp/packer/builder/vmware/vmx" - alicloudimportpostprocessor "github.com/hashicorp/packer/post-processor/alicloud-import" amazonimportpostprocessor "github.com/hashicorp/packer/post-processor/amazon-import" + amazoninstancebuilder "github.com/hashicorp/packer/builder/amazon/instance" + ansiblelocalprovisioner "github.com/hashicorp/packer/provisioner/ansible-local" + ansibleprovisioner "github.com/hashicorp/packer/provisioner/ansible" artificepostprocessor "github.com/hashicorp/packer/post-processor/artifice" atlaspostprocessor "github.com/hashicorp/packer/post-processor/atlas" + azurearmbuilder "github.com/hashicorp/packer/builder/azure/arm" checksumpostprocessor "github.com/hashicorp/packer/post-processor/checksum" + chefclientprovisioner "github.com/hashicorp/packer/provisioner/chef-client" + chefsoloprovisioner "github.com/hashicorp/packer/provisioner/chef-solo" + cloudstackbuilder "github.com/hashicorp/packer/builder/cloudstack" compresspostprocessor "github.com/hashicorp/packer/post-processor/compress" + convergeprovisioner "github.com/hashicorp/packer/provisioner/converge" + digitaloceanbuilder "github.com/hashicorp/packer/builder/digitalocean" + dockerbuilder "github.com/hashicorp/packer/builder/docker" dockerimportpostprocessor "github.com/hashicorp/packer/post-processor/docker-import" dockerpushpostprocessor "github.com/hashicorp/packer/post-processor/docker-push" dockersavepostprocessor "github.com/hashicorp/packer/post-processor/docker-save" dockertagpostprocessor "github.com/hashicorp/packer/post-processor/docker-tag" - googlecomputeexportpostprocessor "github.com/hashicorp/packer/post-processor/googlecompute-export" - manifestpostprocessor "github.com/hashicorp/packer/post-processor/manifest" - shelllocalpostprocessor "github.com/hashicorp/packer/post-processor/shell-local" - vagrantpostprocessor "github.com/hashicorp/packer/post-processor/vagrant" - vagrantcloudpostprocessor "github.com/hashicorp/packer/post-processor/vagrant-cloud" - vspherepostprocessor "github.com/hashicorp/packer/post-processor/vsphere" - vspheretemplatepostprocessor "github.com/hashicorp/packer/post-processor/vsphere-template" - ansibleprovisioner "github.com/hashicorp/packer/provisioner/ansible" - ansiblelocalprovisioner "github.com/hashicorp/packer/provisioner/ansible-local" - chefclientprovisioner "github.com/hashicorp/packer/provisioner/chef-client" - chefsoloprovisioner "github.com/hashicorp/packer/provisioner/chef-solo" - convergeprovisioner "github.com/hashicorp/packer/provisioner/converge" + filebuilder "github.com/hashicorp/packer/builder/file" fileprovisioner "github.com/hashicorp/packer/provisioner/file" + googlecomputebuilder "github.com/hashicorp/packer/builder/googlecompute" + googlecomputeexportpostprocessor "github.com/hashicorp/packer/post-processor/googlecompute-export" + hypervisobuilder "github.com/hashicorp/packer/builder/hyperv/iso" + hypervvmcxbuilder "github.com/hashicorp/packer/builder/hyperv/vmcx" + lxcbuilder "github.com/hashicorp/packer/builder/lxc" + lxdbuilder "github.com/hashicorp/packer/builder/lxd" + manifestpostprocessor "github.com/hashicorp/packer/post-processor/manifest" + nullbuilder "github.com/hashicorp/packer/builder/null" + oneandonebuilder "github.com/hashicorp/packer/builder/oneandone" + openstackbuilder "github.com/hashicorp/packer/builder/openstack" + oracleclassicbuilder "github.com/hashicorp/packer/builder/oracle/classic" + oracleocibuilder "github.com/hashicorp/packer/builder/oracle/oci" + parallelsisobuilder "github.com/hashicorp/packer/builder/parallels/iso" + parallelspvmbuilder "github.com/hashicorp/packer/builder/parallels/pvm" powershellprovisioner "github.com/hashicorp/packer/provisioner/powershell" + profitbricksbuilder "github.com/hashicorp/packer/builder/profitbricks" puppetmasterlessprovisioner "github.com/hashicorp/packer/provisioner/puppet-masterless" puppetserverprovisioner "github.com/hashicorp/packer/provisioner/puppet-server" + qemubuilder "github.com/hashicorp/packer/builder/qemu" saltmasterlessprovisioner "github.com/hashicorp/packer/provisioner/salt-masterless" - shellprovisioner "github.com/hashicorp/packer/provisioner/shell" + shelllocalpostprocessor "github.com/hashicorp/packer/post-processor/shell-local" shelllocalprovisioner "github.com/hashicorp/packer/provisioner/shell-local" + shellprovisioner "github.com/hashicorp/packer/provisioner/shell" + tritonbuilder "github.com/hashicorp/packer/builder/triton" + vagrantcloudpostprocessor "github.com/hashicorp/packer/post-processor/vagrant-cloud" + vagrantpostprocessor "github.com/hashicorp/packer/post-processor/vagrant" + virtualboxisobuilder "github.com/hashicorp/packer/builder/virtualbox/iso" + virtualboxovfbuilder "github.com/hashicorp/packer/builder/virtualbox/ovf" + vmwareisobuilder "github.com/hashicorp/packer/builder/vmware/iso" + vmwarevmxbuilder "github.com/hashicorp/packer/builder/vmware/vmx" + vspherepostprocessor "github.com/hashicorp/packer/post-processor/vsphere" + vspheretemplatepostprocessor "github.com/hashicorp/packer/post-processor/vsphere-template" windowsrestartprovisioner "github.com/hashicorp/packer/provisioner/windows-restart" windowsshellprovisioner "github.com/hashicorp/packer/provisioner/windows-shell" + ) type PluginCommand struct { @@ -80,74 +82,78 @@ type PluginCommand struct { } var Builders = map[string]packer.Builder{ - "alicloud-ecs": new(alicloudecsbuilder.Builder), - "amazon-chroot": new(amazonchrootbuilder.Builder), - "amazon-ebs": new(amazonebsbuilder.Builder), - "amazon-ebssurrogate": new(amazonebssurrogatebuilder.Builder), - "amazon-ebsvolume": new(amazonebsvolumebuilder.Builder), - "amazon-instance": new(amazoninstancebuilder.Builder), - "azure-arm": new(azurearmbuilder.Builder), - "cloudstack": new(cloudstackbuilder.Builder), - "digitalocean": new(digitaloceanbuilder.Builder), - "docker": new(dockerbuilder.Builder), - "file": new(filebuilder.Builder), - "googlecompute": new(googlecomputebuilder.Builder), - "hyperv-iso": new(hypervisobuilder.Builder), - "hyperv-vmcx": new(hypervvmcxbuilder.Builder), - "lxc": new(lxcbuilder.Builder), - "lxd": new(lxdbuilder.Builder), - "null": new(nullbuilder.Builder), - "oneandone": new(oneandonebuilder.Builder), - "openstack": new(openstackbuilder.Builder), - "oracle-oci": new(oracleocibuilder.Builder), - "parallels-iso": new(parallelsisobuilder.Builder), - "parallels-pvm": new(parallelspvmbuilder.Builder), - "profitbricks": new(profitbricksbuilder.Builder), - "qemu": new(qemubuilder.Builder), - "triton": new(tritonbuilder.Builder), - "virtualbox-iso": new(virtualboxisobuilder.Builder), - "virtualbox-ovf": new(virtualboxovfbuilder.Builder), - "vmware-iso": new(vmwareisobuilder.Builder), - "vmware-vmx": new(vmwarevmxbuilder.Builder), + "alicloud-ecs": new(alicloudecsbuilder.Builder), + "amazon-chroot": new(amazonchrootbuilder.Builder), + "amazon-ebs": new(amazonebsbuilder.Builder), + "amazon-ebssurrogate": new(amazonebssurrogatebuilder.Builder), + "amazon-ebsvolume": new(amazonebsvolumebuilder.Builder), + "amazon-instance": new(amazoninstancebuilder.Builder), + "azure-arm": new(azurearmbuilder.Builder), + "cloudstack": new(cloudstackbuilder.Builder), + "digitalocean": new(digitaloceanbuilder.Builder), + "docker": new(dockerbuilder.Builder), + "file": new(filebuilder.Builder), + "googlecompute": new(googlecomputebuilder.Builder), + "hyperv-iso": new(hypervisobuilder.Builder), + "hyperv-vmcx": new(hypervvmcxbuilder.Builder), + "lxc": new(lxcbuilder.Builder), + "lxd": new(lxdbuilder.Builder), + "null": new(nullbuilder.Builder), + "oneandone": new(oneandonebuilder.Builder), + "openstack": new(openstackbuilder.Builder), + "oracle-classic": new(oracleclassicbuilder.Builder), + "oracle-oci": new(oracleocibuilder.Builder), + "parallels-iso": new(parallelsisobuilder.Builder), + "parallels-pvm": new(parallelspvmbuilder.Builder), + "profitbricks": new(profitbricksbuilder.Builder), + "qemu": new(qemubuilder.Builder), + "triton": new(tritonbuilder.Builder), + "virtualbox-iso": new(virtualboxisobuilder.Builder), + "virtualbox-ovf": new(virtualboxovfbuilder.Builder), + "vmware-iso": new(vmwareisobuilder.Builder), + "vmware-vmx": new(vmwarevmxbuilder.Builder), } + var Provisioners = map[string]packer.Provisioner{ - "ansible": new(ansibleprovisioner.Provisioner), - "ansible-local": new(ansiblelocalprovisioner.Provisioner), - "chef-client": new(chefclientprovisioner.Provisioner), - "chef-solo": new(chefsoloprovisioner.Provisioner), - "converge": new(convergeprovisioner.Provisioner), - "file": new(fileprovisioner.Provisioner), - "powershell": new(powershellprovisioner.Provisioner), - "puppet-masterless": new(puppetmasterlessprovisioner.Provisioner), - "puppet-server": new(puppetserverprovisioner.Provisioner), + "ansible": new(ansibleprovisioner.Provisioner), + "ansible-local": new(ansiblelocalprovisioner.Provisioner), + "chef-client": new(chefclientprovisioner.Provisioner), + "chef-solo": new(chefsoloprovisioner.Provisioner), + "converge": new(convergeprovisioner.Provisioner), + "file": new(fileprovisioner.Provisioner), + "powershell": new(powershellprovisioner.Provisioner), + "puppet-masterless": new(puppetmasterlessprovisioner.Provisioner), + "puppet-server": new(puppetserverprovisioner.Provisioner), "salt-masterless": new(saltmasterlessprovisioner.Provisioner), - "shell": new(shellprovisioner.Provisioner), - "shell-local": new(shelllocalprovisioner.Provisioner), + "shell": new(shellprovisioner.Provisioner), + "shell-local": new(shelllocalprovisioner.Provisioner), "windows-restart": new(windowsrestartprovisioner.Provisioner), - "windows-shell": new(windowsshellprovisioner.Provisioner), + "windows-shell": new(windowsshellprovisioner.Provisioner), } + var PostProcessors = map[string]packer.PostProcessor{ - "alicloud-import": new(alicloudimportpostprocessor.PostProcessor), - "amazon-import": new(amazonimportpostprocessor.PostProcessor), - "artifice": new(artificepostprocessor.PostProcessor), - "atlas": new(atlaspostprocessor.PostProcessor), - "checksum": new(checksumpostprocessor.PostProcessor), - "compress": new(compresspostprocessor.PostProcessor), - "docker-import": new(dockerimportpostprocessor.PostProcessor), - "docker-push": new(dockerpushpostprocessor.PostProcessor), - "docker-save": new(dockersavepostprocessor.PostProcessor), - "docker-tag": new(dockertagpostprocessor.PostProcessor), - "googlecompute-export": new(googlecomputeexportpostprocessor.PostProcessor), - "manifest": new(manifestpostprocessor.PostProcessor), - "shell-local": new(shelllocalpostprocessor.PostProcessor), - "vagrant": new(vagrantpostprocessor.PostProcessor), - "vagrant-cloud": new(vagrantcloudpostprocessor.PostProcessor), - "vsphere": new(vspherepostprocessor.PostProcessor), - "vsphere-template": new(vspheretemplatepostprocessor.PostProcessor), + "alicloud-import": new(alicloudimportpostprocessor.PostProcessor), + "amazon-import": new(amazonimportpostprocessor.PostProcessor), + "artifice": new(artificepostprocessor.PostProcessor), + "atlas": new(atlaspostprocessor.PostProcessor), + "checksum": new(checksumpostprocessor.PostProcessor), + "compress": new(compresspostprocessor.PostProcessor), + "docker-import": new(dockerimportpostprocessor.PostProcessor), + "docker-push": new(dockerpushpostprocessor.PostProcessor), + "docker-save": new(dockersavepostprocessor.PostProcessor), + "docker-tag": new(dockertagpostprocessor.PostProcessor), + "googlecompute-export": new(googlecomputeexportpostprocessor.PostProcessor), + "manifest": new(manifestpostprocessor.PostProcessor), + "shell-local": new(shelllocalpostprocessor.PostProcessor), + "vagrant": new(vagrantpostprocessor.PostProcessor), + "vagrant-cloud": new(vagrantcloudpostprocessor.PostProcessor), + "vsphere": new(vspherepostprocessor.PostProcessor), + "vsphere-template": new(vspheretemplatepostprocessor.PostProcessor), } + var pluginRegexp = regexp.MustCompile("packer-(builder|post-processor|provisioner)-(.+)") func (c *PluginCommand) Run(args []string) int { From 8b420944c5cd86b224c4ef9041560370c15c45c9 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 17 Jan 2018 16:25:38 -0800 Subject: [PATCH 078/251] debugs --- builder/oracle/classic/config.go | 2 +- builder/oracle/classic/step_create_instance.go | 2 +- builder/oracle/classic/step_snapshot.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index 10ec44c54..8cdd9aff6 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -26,7 +26,7 @@ type Config struct { // Image ImageName string `mapstructure:"image_name"` Shape string `mapstructure:"shape"` - ImageList string `json:"image_list"` + ImageList string `mapstructure:"image_list"` ctx interpolate.Context } diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index 7de599190..c13338928 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -14,7 +14,7 @@ func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction // get variables from state ui := state.Get("ui").(packer.Ui) ui.Say("Creating Instance...") - config := state.Get("config").(Config) + config := state.Get("config").(*Config) client := state.Get("client").(*compute.ComputeClient) sshPublicKey := state.Get("publicKey").(string) diff --git a/builder/oracle/classic/step_snapshot.go b/builder/oracle/classic/step_snapshot.go index 2df8976a1..e83df4394 100644 --- a/builder/oracle/classic/step_snapshot.go +++ b/builder/oracle/classic/step_snapshot.go @@ -14,7 +14,7 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { // get variables from state ui := state.Get("ui").(packer.Ui) ui.Say("Creating Snapshot...") - config := state.Get("config").(Config) + config := state.Get("config").(*Config) client := state.Get("client").(*compute.ComputeClient) instanceID := state.Get("instance_id").(string) From a8a0072049fb46b2e676e9e3d87471a53ab37fe0 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 17 Jan 2018 16:33:35 -0800 Subject: [PATCH 079/251] oops need to add this moved file to git --- builder/oracle/common/ssh.go | 45 ++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 builder/oracle/common/ssh.go diff --git a/builder/oracle/common/ssh.go b/builder/oracle/common/ssh.go new file mode 100644 index 000000000..aa2780128 --- /dev/null +++ b/builder/oracle/common/ssh.go @@ -0,0 +1,45 @@ +package common + +import ( + "fmt" + + packerssh "github.com/hashicorp/packer/communicator/ssh" + "github.com/mitchellh/multistep" + "golang.org/x/crypto/ssh" +) + +func CommHost(state multistep.StateBag) (string, error) { + ipAddress := state.Get("instance_ip").(string) + return ipAddress, nil +} + +// SSHConfig returns a function that can be used for the SSH communicator +// config for connecting to the instance created over SSH using the private key +// or password. +func SSHConfig(username, password string) func(state multistep.StateBag) (*ssh.ClientConfig, error) { + return func(state multistep.StateBag) (*ssh.ClientConfig, error) { + privateKey, hasKey := state.GetOk("privateKey") + if hasKey { + + signer, err := ssh.ParsePrivateKey([]byte(privateKey.(string))) + if err != nil { + return nil, fmt.Errorf("Error setting up SSH config: %s", err) + } + return &ssh.ClientConfig{ + User: username, + Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + }, nil + + } + + return &ssh.ClientConfig{ + User: username, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + Auth: []ssh.AuthMethod{ + ssh.Password(password), + ssh.KeyboardInteractive(packerssh.PasswordKeyboardInteractive(password)), + }, + }, nil + } +} From 6556a851dcce4f5e494dfaaa7588ea0d5ee696a5 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 18 Jan 2018 13:44:00 -0800 Subject: [PATCH 080/251] fix ssh key handling --- builder/amazon/common/step_key_pair.go | 3 +- builder/oracle/classic/builder.go | 3 +- .../oracle/classic/step_create_instance.go | 39 ++++++++++++++++--- vendor/vendor.json | 6 +++ 4 files changed, 43 insertions(+), 8 deletions(-) diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index 927101fcf..55f5aace7 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "os" "runtime" + "strings" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/packer/helper/multistep" @@ -36,7 +37,7 @@ func (s *StepKeyPair) Run(_ context.Context, state multistep.StateBag) multistep } state.Put("keyPair", s.KeyPairName) - state.Put("privateKey", string(privateKeyBytes)) + state.Put("privateKey", strings.TrimSpace(string(privateKeyBytes))) return multistep.ActionContinue } diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go index 4501456d2..b7773ee19 100644 --- a/builder/oracle/classic/builder.go +++ b/builder/oracle/classic/builder.go @@ -3,6 +3,7 @@ package classic import ( "fmt" "log" + "strings" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-oracle-terraform/compute" @@ -63,7 +64,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &ocommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("oci_classic_%s.pem", b.config.PackerBuildName), - PrivateKeyFile: b.config.Comm.SSHPrivateKey, + PrivateKeyFile: strings.TrimSpace(b.config.Comm.SSHPrivateKey), }, &stepCreateIPReservation{}, &stepCreateInstance{}, diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index c13338928..fd91ea930 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -2,6 +2,7 @@ package classic import ( "fmt" + "strings" "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/packer/packer" @@ -16,18 +17,44 @@ func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction ui.Say("Creating Instance...") config := state.Get("config").(*Config) client := state.Get("client").(*compute.ComputeClient) - sshPublicKey := state.Get("publicKey").(string) + sshPublicKey := strings.TrimSpace(state.Get("publicKey").(string)) + + // Load the dynamically-generated SSH key into the Oracle Compute cloud. + sshKeyName := fmt.Sprintf("/Compute-%s/%s/packer_dynamic_key", config.IdentityDomain, config.Username) + + sshKeysClient := client.SSHKeys() + sshKeysInput := compute.CreateSSHKeyInput{ + Name: sshKeyName, + Key: sshPublicKey, + Enabled: true, + } + keyInfo, err := sshKeysClient.CreateSSHKey(&sshKeysInput) + if err != nil { + // Key already exists; update key instead + if strings.Contains(err.Error(), "packer_dynamic_key already exists") { + updateKeysInput := compute.UpdateSSHKeyInput{ + Name: sshKeyName, + Key: sshPublicKey, + Enabled: true, + } + keyInfo, err = sshKeysClient.UpdateSSHKey(&updateKeysInput) + } else { + err = fmt.Errorf("Problem adding Public SSH key through Oracle's API: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } + } // get instances client instanceClient := client.Instances() // Instances Input input := &compute.CreateInstanceInput{ - Name: config.ImageName, - Shape: config.Shape, - ImageList: config.ImageList, - SSHKeys: []string{sshPublicKey}, - Attributes: map[string]interface{}{}, + Name: config.ImageName, + Shape: config.Shape, + ImageList: config.ImageList, + SSHKeys: []string{keyInfo.Name}, } instanceInfo, err := instanceClient.CreateInstance(input) diff --git a/vendor/vendor.json b/vendor/vendor.json index fe36fe283..256c2e1d2 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -785,6 +785,12 @@ "path": "github.com/hashicorp/go-cleanhttp", "revision": "875fb671b3ddc66f8e2f0acc33829c8cb989a38d" }, + { + "checksumSHA1": "1hPCerVn1bWA+9vaAGqnIkRtSFc=", + "path": "github.com/hashicorp/go-getter", + "revision": "cb2c2774e9771bd9568d843be4e59298786550fd", + "revisionTime": "2017-12-20T21:10:09Z" + }, { "checksumSHA1": "lrSl49G23l6NhfilxPM0XFs5rZo=", "path": "github.com/hashicorp/go-multierror", From f208a071a4d4ce941b0fde6a2bea3b5c8d994276 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 18 Jan 2018 15:52:31 -0800 Subject: [PATCH 081/251] fix communicator --- builder/oracle/classic/config.go | 6 ++++++ builder/oracle/classic/step_create_instance.go | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index 8cdd9aff6..733d2252c 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/communicator" "github.com/hashicorp/packer/helper/config" + "github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/template/interpolate" ) @@ -48,5 +49,10 @@ func NewConfig(raws ...interface{}) (*Config, error) { return nil, fmt.Errorf("Error parsing API Endpoint: %s", err) } + var errs *packer.MultiError + if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { + errs = packer.MultiErrorAppend(errs, es...) + } + return c, nil } diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index fd91ea930..1456da99e 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -20,7 +20,7 @@ func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction sshPublicKey := strings.TrimSpace(state.Get("publicKey").(string)) // Load the dynamically-generated SSH key into the Oracle Compute cloud. - sshKeyName := fmt.Sprintf("/Compute-%s/%s/packer_dynamic_key", config.IdentityDomain, config.Username) + sshKeyName := fmt.Sprintf("/Compute-%s/%s/packer_generated_key", config.IdentityDomain, config.Username) sshKeysClient := client.SSHKeys() sshKeysInput := compute.CreateSSHKeyInput{ @@ -31,7 +31,7 @@ func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction keyInfo, err := sshKeysClient.CreateSSHKey(&sshKeysInput) if err != nil { // Key already exists; update key instead - if strings.Contains(err.Error(), "packer_dynamic_key already exists") { + if strings.Contains(err.Error(), "packer_generated_key already exists") { updateKeysInput := compute.UpdateSSHKeyInput{ Name: sshKeyName, Key: sshPublicKey, From 69ba710c2a1661eab9f26239d445b2666864dfb0 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 19 Jan 2018 15:37:06 -0800 Subject: [PATCH 082/251] PROGRESS! Now it only fails on the snapshot step --- builder/oracle/classic/builder.go | 3 +- .../classic/step_create_IP_reservation.go | 3 ++ .../oracle/classic/step_create_instance.go | 33 +++++++++++++++---- 3 files changed, 31 insertions(+), 8 deletions(-) diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go index b7773ee19..4501456d2 100644 --- a/builder/oracle/classic/builder.go +++ b/builder/oracle/classic/builder.go @@ -3,7 +3,6 @@ package classic import ( "fmt" "log" - "strings" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-oracle-terraform/compute" @@ -64,7 +63,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &ocommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("oci_classic_%s.pem", b.config.PackerBuildName), - PrivateKeyFile: strings.TrimSpace(b.config.Comm.SSHPrivateKey), + PrivateKeyFile: b.config.Comm.SSHPrivateKey, }, &stepCreateIPReservation{}, &stepCreateInstance{}, diff --git a/builder/oracle/classic/step_create_IP_reservation.go b/builder/oracle/classic/step_create_IP_reservation.go index f3f898eaa..f74dd6bb1 100644 --- a/builder/oracle/classic/step_create_IP_reservation.go +++ b/builder/oracle/classic/step_create_IP_reservation.go @@ -1,6 +1,7 @@ package classic import ( + "fmt" "log" "github.com/hashicorp/go-oracle-terraform/compute" @@ -13,12 +14,14 @@ type stepCreateIPReservation struct{} func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say("Creating IP reservation...") + config := state.Get("config").(*Config) client := state.Get("client").(*compute.ComputeClient) iprClient := client.IPReservations() // TODO: add optional Name and Tags IPInput := &compute.CreateIPReservationInput{ ParentPool: compute.PublicReservationPool, Permanent: true, + Name: fmt.Sprintf("ipres_%s", config.ImageName), } ipRes, err := iprClient.CreateIPReservation(IPInput) if err != nil { diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index 1456da99e..1cbfcbcc6 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -2,6 +2,7 @@ package classic import ( "fmt" + "log" "strings" "github.com/hashicorp/go-oracle-terraform/compute" @@ -17,9 +18,13 @@ func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction ui.Say("Creating Instance...") config := state.Get("config").(*Config) client := state.Get("client").(*compute.ComputeClient) + + // SSH KEY CONFIGURATION + + // grab packer-generated key from statebag context. sshPublicKey := strings.TrimSpace(state.Get("publicKey").(string)) - // Load the dynamically-generated SSH key into the Oracle Compute cloud. + // form API call to add key to compute cloud sshKeyName := fmt.Sprintf("/Compute-%s/%s/packer_generated_key", config.IdentityDomain, config.Username) sshKeysClient := client.SSHKeys() @@ -28,9 +33,11 @@ func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction Key: sshPublicKey, Enabled: true, } + + // Load the packer-generated SSH key into the Oracle Compute cloud. keyInfo, err := sshKeysClient.CreateSSHKey(&sshKeysInput) if err != nil { - // Key already exists; update key instead + // Key already exists; update key instead of creating it if strings.Contains(err.Error(), "packer_generated_key already exists") { updateKeysInput := compute.UpdateSSHKeyInput{ Name: sshKeyName, @@ -46,15 +53,29 @@ func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction } } + // NETWORKING INFO CONFIGURATION + ipAddName := fmt.Sprintf("ipres_%s", config.ImageName) + log.Printf("MEGAN ipADDName is %s", ipAddName) + secListName := "Megan_packer_test" + + netInfo := compute.NetworkingInfo{ + Nat: []string{ipAddName}, + SecLists: []string{secListName}, + } + fmt.Sprintf("Megan netInfo is %#v", netInfo) + + // INSTANCE LAUNCH + // get instances client instanceClient := client.Instances() // Instances Input input := &compute.CreateInstanceInput{ - Name: config.ImageName, - Shape: config.Shape, - ImageList: config.ImageList, - SSHKeys: []string{keyInfo.Name}, + Name: config.ImageName, + Shape: config.Shape, + ImageList: config.ImageList, + SSHKeys: []string{keyInfo.Name}, + Networking: map[string]compute.NetworkingInfo{"eth0": netInfo}, } instanceInfo, err := instanceClient.CreateInstance(input) From 256382547b0f708a6bb0769d783085d4d253d1db Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 19 Jan 2018 16:00:12 -0800 Subject: [PATCH 083/251] snapshot step works --- builder/oracle/classic/step_snapshot.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/oracle/classic/step_snapshot.go b/builder/oracle/classic/step_snapshot.go index e83df4394..52c2557c6 100644 --- a/builder/oracle/classic/step_snapshot.go +++ b/builder/oracle/classic/step_snapshot.go @@ -23,7 +23,7 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { // Instances Input snapshotInput := &compute.CreateSnapshotInput{ - Instance: instanceID, + Instance: fmt.Sprintf("%s/%s", config.ImageName, instanceID), MachineImage: config.ImageName, } From f6c60aac787b54441053fcda79f47ca73cc31c9d Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 19 Jan 2018 16:08:42 -0800 Subject: [PATCH 084/251] clean up instance --- .../oracle/classic/step_create_instance.go | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index 1cbfcbcc6..8c3edf3a3 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -92,5 +92,30 @@ func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction } func (s *stepCreateInstance) Cleanup(state multistep.StateBag) { - // Nothing to do + // terminate instance + ui := state.Get("ui").(packer.Ui) + client := state.Get("client").(*compute.ComputeClient) + imID := state.Get("instance_id").(string) + + ui.Say(fmt.Sprintf("Terminating instance (%s)...", id)) + + instanceClient := client.Instances() + // Instances Input + input := &compute.DeleteInstanceInput{ + Name: config.ImageName, + ID: imID, + } + + instanceInfo, err := instanceClient.DeleteInstance(input) + if err != nil { + err = fmt.Errorf("Problem destroying instance: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return + } + + // TODO wait for instance state to change to deleted? + + ui.Say("Terminated instance.") + return } From 89159f3a87a8c7b00dd9c13f0a713b186fc71cd0 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 19 Jan 2018 16:13:36 -0800 Subject: [PATCH 085/251] fix bugs in cleanup --- builder/oracle/classic/step_create_instance.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index 8c3edf3a3..f33e5d0e5 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -95,9 +95,10 @@ func (s *stepCreateInstance) Cleanup(state multistep.StateBag) { // terminate instance ui := state.Get("ui").(packer.Ui) client := state.Get("client").(*compute.ComputeClient) + config := state.Get("config").(*Config) imID := state.Get("instance_id").(string) - ui.Say(fmt.Sprintf("Terminating instance (%s)...", id)) + ui.Say(fmt.Sprintf("Terminating instance (%s)...", imID)) instanceClient := client.Instances() // Instances Input @@ -106,7 +107,7 @@ func (s *stepCreateInstance) Cleanup(state multistep.StateBag) { ID: imID, } - instanceInfo, err := instanceClient.DeleteInstance(input) + err := instanceClient.DeleteInstance(input) if err != nil { err = fmt.Errorf("Problem destroying instance: %s", err) ui.Error(err.Error()) From 53ff257cf0d20f57937d546e988504017e60a58d Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Mon, 22 Jan 2018 16:54:09 -0800 Subject: [PATCH 086/251] it LLIIIIIIIIIVES --- builder/oracle/classic/builder.go | 2 + .../oracle/classic/step_create_instance.go | 49 ++----------------- 2 files changed, 7 insertions(+), 44 deletions(-) diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go index 4501456d2..7e8c07f53 100644 --- a/builder/oracle/classic/builder.go +++ b/builder/oracle/classic/builder.go @@ -66,6 +66,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe PrivateKeyFile: b.config.Comm.SSHPrivateKey, }, &stepCreateIPReservation{}, + &stepAddKeysToAPI{}, + &stepSecurity{}, &stepCreateInstance{}, &communicator.StepConnect{ Config: &b.config.Comm, diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index f33e5d0e5..bf89a51e3 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -3,7 +3,6 @@ package classic import ( "fmt" "log" - "strings" "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/packer/packer" @@ -18,53 +17,16 @@ func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction ui.Say("Creating Instance...") config := state.Get("config").(*Config) client := state.Get("client").(*compute.ComputeClient) + keyName := state.Get("key_name").(string) - // SSH KEY CONFIGURATION - - // grab packer-generated key from statebag context. - sshPublicKey := strings.TrimSpace(state.Get("publicKey").(string)) - - // form API call to add key to compute cloud - sshKeyName := fmt.Sprintf("/Compute-%s/%s/packer_generated_key", config.IdentityDomain, config.Username) - - sshKeysClient := client.SSHKeys() - sshKeysInput := compute.CreateSSHKeyInput{ - Name: sshKeyName, - Key: sshPublicKey, - Enabled: true, - } - - // Load the packer-generated SSH key into the Oracle Compute cloud. - keyInfo, err := sshKeysClient.CreateSSHKey(&sshKeysInput) - if err != nil { - // Key already exists; update key instead of creating it - if strings.Contains(err.Error(), "packer_generated_key already exists") { - updateKeysInput := compute.UpdateSSHKeyInput{ - Name: sshKeyName, - Key: sshPublicKey, - Enabled: true, - } - keyInfo, err = sshKeysClient.UpdateSSHKey(&updateKeysInput) - } else { - err = fmt.Errorf("Problem adding Public SSH key through Oracle's API: %s", err) - ui.Error(err.Error()) - state.Put("error", err) - return multistep.ActionHalt - } - } - - // NETWORKING INFO CONFIGURATION ipAddName := fmt.Sprintf("ipres_%s", config.ImageName) - log.Printf("MEGAN ipADDName is %s", ipAddName) - secListName := "Megan_packer_test" + // secListName := "Megan_packer_test" // hack to get working; fix b4 release + secListName := state.Get("security_list").(string) netInfo := compute.NetworkingInfo{ Nat: []string{ipAddName}, SecLists: []string{secListName}, } - fmt.Sprintf("Megan netInfo is %#v", netInfo) - - // INSTANCE LAUNCH // get instances client instanceClient := client.Instances() @@ -74,7 +36,7 @@ func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction Name: config.ImageName, Shape: config.Shape, ImageList: config.ImageList, - SSHKeys: []string{keyInfo.Name}, + SSHKeys: []string{keyName}, Networking: map[string]compute.NetworkingInfo{"eth0": netInfo}, } @@ -106,6 +68,7 @@ func (s *stepCreateInstance) Cleanup(state multistep.StateBag) { Name: config.ImageName, ID: imID, } + log.Printf("instance destroy input is %#v", input) err := instanceClient.DeleteInstance(input) if err != nil { @@ -114,9 +77,7 @@ func (s *stepCreateInstance) Cleanup(state multistep.StateBag) { state.Put("error", err) return } - // TODO wait for instance state to change to deleted? - ui.Say("Terminated instance.") return } From 531cb2244d8749c80b1ee07fad2595b175b01133 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Mon, 22 Jan 2018 16:54:49 -0800 Subject: [PATCH 087/251] add separated out steps --- .../oracle/classic/step_add_keys_to_api.go | 58 +++++++++++++++ builder/oracle/classic/step_security.go | 70 +++++++++++++++++++ 2 files changed, 128 insertions(+) create mode 100644 builder/oracle/classic/step_add_keys_to_api.go create mode 100644 builder/oracle/classic/step_security.go diff --git a/builder/oracle/classic/step_add_keys_to_api.go b/builder/oracle/classic/step_add_keys_to_api.go new file mode 100644 index 000000000..f28a9bce9 --- /dev/null +++ b/builder/oracle/classic/step_add_keys_to_api.go @@ -0,0 +1,58 @@ +package classic + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" +) + +type stepAddKeysToAPI struct{} + +func (s *stepAddKeysToAPI) Run(state multistep.StateBag) multistep.StepAction { + // get variables from state + ui := state.Get("ui").(packer.Ui) + ui.Say("Adding SSH keys to API...") + config := state.Get("config").(*Config) + client := state.Get("client").(*compute.ComputeClient) + + // grab packer-generated key from statebag context. + sshPublicKey := strings.TrimSpace(state.Get("publicKey").(string)) + + // form API call to add key to compute cloud + sshKeyName := fmt.Sprintf("/Compute-%s/%s/packer_generated_key", config.IdentityDomain, config.Username) + + sshKeysClient := client.SSHKeys() + sshKeysInput := compute.CreateSSHKeyInput{ + Name: sshKeyName, + Key: sshPublicKey, + Enabled: true, + } + + // Load the packer-generated SSH key into the Oracle Compute cloud. + keyInfo, err := sshKeysClient.CreateSSHKey(&sshKeysInput) + if err != nil { + // Key already exists; update key instead of creating it + if strings.Contains(err.Error(), "packer_generated_key already exists") { + updateKeysInput := compute.UpdateSSHKeyInput{ + Name: sshKeyName, + Key: sshPublicKey, + Enabled: true, + } + keyInfo, err = sshKeysClient.UpdateSSHKey(&updateKeysInput) + } else { + err = fmt.Errorf("Problem adding Public SSH key through Oracle's API: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } + } + state.Put("key_name", keyInfo.Name) + return multistep.ActionContinue +} + +func (s *stepAddKeysToAPI) Cleanup(state multistep.StateBag) { + // Nothing to do +} diff --git a/builder/oracle/classic/step_security.go b/builder/oracle/classic/step_security.go new file mode 100644 index 000000000..c907ceb7f --- /dev/null +++ b/builder/oracle/classic/step_security.go @@ -0,0 +1,70 @@ +package classic + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" +) + +type stepSecurity struct{} + +func (s *stepSecurity) Run(state multistep.StateBag) multistep.StepAction { + // TODO create overrides that allow savvy users to add the image to their + // own security lists instead of ours + ui := state.Get("ui").(packer.Ui) + ui.Say("Configuring security lists and rules...") + config := state.Get("config").(*Config) + client := state.Get("client").(*compute.ComputeClient) + + secListName := fmt.Sprintf("/Compute-%s/%s/Packer_SSH_Allow", + config.IdentityDomain, config.Username) + secListClient := client.SecurityLists() + secListInput := compute.CreateSecurityListInput{ + Description: "Packer-generated security list to give packer ssh access", + Name: secListName, + } + _, err := secListClient.CreateSecurityList(&secListInput) + if err != nil { + if !strings.Contains(err.Error(), "already exists") { + err = fmt.Errorf("Error creating security security IP List to"+ + " allow Packer to connect to Oracle instance via SSH: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } + } + secListURI := fmt.Sprintf("%s/seclist/Compute-%s/%s/Packer_SSH_Allow", + config.APIEndpoint, config.IdentityDomain, config.Username) + log.Printf("Megan secListURI is %s", secListURI) + // DOCS NOTE: user must have Compute_Operations role + // Create security rule that allows Packer to connect via SSH + + secRulesClient := client.SecRules() + secRulesInput := compute.CreateSecRuleInput{ + Action: "PERMIT", + Application: "/oracle/public/ssh", + Description: "Packer-generated security rule to allow ssh", + DestinationList: fmt.Sprintf("seclist:%s", secListName), + Name: "Packer-allow-SSH-Rule", + SourceList: "seciplist:/oracle/public/public-internet", + } + + _, err = secRulesClient.CreateSecRule(&secRulesInput) + if err != nil { + err = fmt.Errorf("Error creating security rule to allow Packer to connect to Oracle instance via SSH: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } + + state.Put("security_list", secListName) + return multistep.ActionContinue +} + +func (s *stepSecurity) Cleanup(state multistep.StateBag) { + // Nothing to do +} From 7d23cfae0af760cbe6f1a4481387d81216c0a38e Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 23 Jan 2018 11:07:04 -0800 Subject: [PATCH 088/251] allow user to add a security list for SSH access; add cleanup for packer-generated rules and lists --- builder/oracle/classic/config.go | 9 ++++ builder/oracle/classic/step_security.go | 59 +++++++++++++++++-------- 2 files changed, 50 insertions(+), 18 deletions(-) diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index 733d2252c..eb3fd6876 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -28,6 +28,11 @@ type Config struct { ImageName string `mapstructure:"image_name"` Shape string `mapstructure:"shape"` ImageList string `mapstructure:"image_list"` + // Optional. Describes what computers are allowed to reach your instance + // via SSH. This whitelist must contain the computer you're running Packer + // from. It defaults to public-internet, meaning that you can SSH into your + // instance from anywhere as long as you have the right keys + SSHSourceList string `mapstructure:"ssh_source_list"` ctx interpolate.Context } @@ -48,6 +53,10 @@ func NewConfig(raws ...interface{}) (*Config, error) { if err != nil { return nil, fmt.Errorf("Error parsing API Endpoint: %s", err) } + // set default source list + if c.SSHSourceList == "" { + c.SSHSourceList = "seciplist:/oracle/public/public-internet" + } var errs *packer.MultiError if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { diff --git a/builder/oracle/classic/step_security.go b/builder/oracle/classic/step_security.go index c907ceb7f..d82f2bc41 100644 --- a/builder/oracle/classic/step_security.go +++ b/builder/oracle/classic/step_security.go @@ -13,15 +13,15 @@ import ( type stepSecurity struct{} func (s *stepSecurity) Run(state multistep.StateBag) multistep.StepAction { - // TODO create overrides that allow savvy users to add the image to their - // own security lists instead of ours ui := state.Get("ui").(packer.Ui) - ui.Say("Configuring security lists and rules...") + + ui.Say("Configuring security lists and rules to enable SSH access...") + config := state.Get("config").(*Config) client := state.Get("client").(*compute.ComputeClient) - secListName := fmt.Sprintf("/Compute-%s/%s/Packer_SSH_Allow", - config.IdentityDomain, config.Username) + secListName := fmt.Sprintf("/Compute-%s/%s/Packer_SSH_Allow_%s", + config.IdentityDomain, config.Username, config.ImageName) secListClient := client.SecurityLists() secListInput := compute.CreateSecurityListInput{ Description: "Packer-generated security list to give packer ssh access", @@ -30,41 +30,64 @@ func (s *stepSecurity) Run(state multistep.StateBag) multistep.StepAction { _, err := secListClient.CreateSecurityList(&secListInput) if err != nil { if !strings.Contains(err.Error(), "already exists") { - err = fmt.Errorf("Error creating security security IP List to"+ + err = fmt.Errorf("Error creating security List to"+ " allow Packer to connect to Oracle instance via SSH: %s", err) ui.Error(err.Error()) state.Put("error", err) return multistep.ActionHalt } } - secListURI := fmt.Sprintf("%s/seclist/Compute-%s/%s/Packer_SSH_Allow", - config.APIEndpoint, config.IdentityDomain, config.Username) - log.Printf("Megan secListURI is %s", secListURI) // DOCS NOTE: user must have Compute_Operations role // Create security rule that allows Packer to connect via SSH - secRulesClient := client.SecRules() secRulesInput := compute.CreateSecRuleInput{ Action: "PERMIT", Application: "/oracle/public/ssh", Description: "Packer-generated security rule to allow ssh", DestinationList: fmt.Sprintf("seclist:%s", secListName), - Name: "Packer-allow-SSH-Rule", - SourceList: "seciplist:/oracle/public/public-internet", + Name: fmt.Sprintf("Packer-allow-SSH-Rule_%s", config.ImageName), + SourceList: config.SSHSourceList, } + secRuleName := fmt.Sprintf("/Compute-%s/%s/Packer-allow-SSH-Rule_%s", + config.IdentityDomain, config.Username, config.ImageName) _, err = secRulesClient.CreateSecRule(&secRulesInput) if err != nil { - err = fmt.Errorf("Error creating security rule to allow Packer to connect to Oracle instance via SSH: %s", err) - ui.Error(err.Error()) - state.Put("error", err) - return multistep.ActionHalt + log.Printf(err.Error()) + if !strings.Contains(err.Error(), "already exists") { + err = fmt.Errorf("Error creating security rule to"+ + " allow Packer to connect to Oracle instance via SSH: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } } - + state.Put("security_rule_name", secRuleName) state.Put("security_list", secListName) return multistep.ActionContinue } func (s *stepSecurity) Cleanup(state multistep.StateBag) { - // Nothing to do + client := state.Get("client").(*compute.ComputeClient) + ui := state.Get("ui").(packer.Ui) + ui.Say("Deleting the packer-generated security rules and lists...") + // delete security list that Packer generated + secListName := state.Get("security_list").(string) + secListClient := client.SecurityLists() + input := compute.DeleteSecurityListInput{Name: secListName} + err := secListClient.DeleteSecurityList(&input) + if err != nil { + ui.Say(fmt.Sprintf("Error deleting the packer-generated security list %s; "+ + "please delete manually. (error : %s)", secListName, err.Error())) + } + // delete security rules that Packer generated + secRuleName := state.Get("security_rule_name").(string) + secRulesClient := client.SecRules() + ruleInput := compute.DeleteSecRuleInput{Name: secRuleName} + err = secRulesClient.DeleteSecRule(&ruleInput) + if err != nil { + ui.Say(fmt.Sprintf("Error deleting the packer-generated security rule %s; "+ + "please delete manually. (error: %s)", secRuleName, err.Error())) + } + return } From b9a90b9261708d66ca974ab60596435c093190f7 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 23 Jan 2018 14:16:51 -0800 Subject: [PATCH 089/251] Check for error when creating ip reso --- ...ate_IP_reservation.go => step_create_ip_reservation.go} | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) rename builder/oracle/classic/{step_create_IP_reservation.go => step_create_ip_reservation.go} (87%) diff --git a/builder/oracle/classic/step_create_IP_reservation.go b/builder/oracle/classic/step_create_ip_reservation.go similarity index 87% rename from builder/oracle/classic/step_create_IP_reservation.go rename to builder/oracle/classic/step_create_ip_reservation.go index f74dd6bb1..733c07e70 100644 --- a/builder/oracle/classic/step_create_IP_reservation.go +++ b/builder/oracle/classic/step_create_ip_reservation.go @@ -24,8 +24,11 @@ func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAc Name: fmt.Sprintf("ipres_%s", config.ImageName), } ipRes, err := iprClient.CreateIPReservation(IPInput) + if err != nil { - log.Printf("Error creating IP Reservation: %s", err) + err := fmt.Errorf("Error creating IP Reservation: %s", err) + state.Put("error", err) + ui.Error(err.Error()) return multistep.ActionHalt } state.Put("instance_ip", ipRes.IP) @@ -34,5 +37,5 @@ func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAc } func (s *stepCreateIPReservation) Cleanup(state multistep.StateBag) { - // Nothing to do + // TODO: delete ip reservation } From 44befb08576804e64a768bcb69aca1694bb668d4 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 23 Jan 2018 17:33:03 -0800 Subject: [PATCH 090/251] rename --- .../oracle/classic/{step_add_keys_to_api.go => step_add_keys.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename builder/oracle/classic/{step_add_keys_to_api.go => step_add_keys.go} (100%) diff --git a/builder/oracle/classic/step_add_keys_to_api.go b/builder/oracle/classic/step_add_keys.go similarity index 100% rename from builder/oracle/classic/step_add_keys_to_api.go rename to builder/oracle/classic/step_add_keys.go From 603881d9902e105d14a90565cedceb18b734de0e Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 23 Jan 2018 18:13:51 -0800 Subject: [PATCH 091/251] add oci/classic artifact --- builder/oracle/classic/artifact.go | 28 ++++++++++++++++++++----- builder/oracle/classic/builder.go | 16 ++++++-------- builder/oracle/classic/step_snapshot.go | 1 + 3 files changed, 30 insertions(+), 15 deletions(-) diff --git a/builder/oracle/classic/artifact.go b/builder/oracle/classic/artifact.go index ff01b186f..bf1fed0ba 100644 --- a/builder/oracle/classic/artifact.go +++ b/builder/oracle/classic/artifact.go @@ -1,7 +1,15 @@ package classic -// Artifact is an artifact implementation that contains a built Custom Image. +import ( + "fmt" + + "github.com/hashicorp/go-oracle-terraform/compute" +) + +// Artifact is an artifact implementation that contains a Snapshot. type Artifact struct { + Snapshot *compute.Snapshot + driver *compute.ComputeClient } // BuilderId uniquely identifies the builder. @@ -15,13 +23,17 @@ func (a *Artifact) Files() []string { return nil } -// Id returns the OCID of the associated Image. func (a *Artifact) Id() string { - return "" + return a.Snapshot.Name } func (a *Artifact) String() string { - return "" + return fmt.Sprintf("A Snapshot was created: \n"+ + "Name: %s\n"+ + "Instance: %s\n"+ + "MachineImage: %s\n"+ + "URI: %s", + a.Snapshot.Name, a.Snapshot.Instance, a.Snapshot.MachineImage, a.Snapshot.URI) } func (a *Artifact) State(name string) interface{} { @@ -30,5 +42,11 @@ func (a *Artifact) State(name string) interface{} { // Destroy deletes the custom image associated with the artifact. func (a *Artifact) Destroy() error { - return nil + client := a.driver.Snapshots() + mic := a.driver.MachineImages() + input := &compute.DeleteSnapshotInput{ + Snapshot: a.Snapshot.Name, + MachineImage: a.Snapshot.MachineImage, + } + return client.DeleteSnapshot(mic, input) } diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go index 7e8c07f53..7847ef179 100644 --- a/builder/oracle/classic/builder.go +++ b/builder/oracle/classic/builder.go @@ -89,17 +89,13 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, rawErr.(error) } - /* - // Build the artifact and return it - artifact := &Artifact{ - Image: state.Get("image").(client.Image), - Region: b.config.AccessCfg.Region, - driver: driver, - } + // Build the artifact and return it + artifact := &Artifact{ + Snapshot: state.Get("snapshot").(*compute.Snapshot), + driver: client, + } - return artifact, nil - */ - return nil, nil + return artifact, nil } // Cancel terminates a running build. diff --git a/builder/oracle/classic/step_snapshot.go b/builder/oracle/classic/step_snapshot.go index 52c2557c6..2e4e9d7a3 100644 --- a/builder/oracle/classic/step_snapshot.go +++ b/builder/oracle/classic/step_snapshot.go @@ -35,6 +35,7 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } + state.Put("snapshot", snap) ui.Say(fmt.Sprintf("Created snapshot (%s).", snap.Name)) return multistep.ActionContinue } From 76ea73c5b208fd37d809782d5c12b5f44abb793d Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 23 Jan 2018 18:14:10 -0800 Subject: [PATCH 092/251] I don't think we need to delete this artifact right now --- builder/oracle/classic/artifact.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/builder/oracle/classic/artifact.go b/builder/oracle/classic/artifact.go index bf1fed0ba..fb7861b04 100644 --- a/builder/oracle/classic/artifact.go +++ b/builder/oracle/classic/artifact.go @@ -42,11 +42,5 @@ func (a *Artifact) State(name string) interface{} { // Destroy deletes the custom image associated with the artifact. func (a *Artifact) Destroy() error { - client := a.driver.Snapshots() - mic := a.driver.MachineImages() - input := &compute.DeleteSnapshotInput{ - Snapshot: a.Snapshot.Name, - MachineImage: a.Snapshot.MachineImage, - } - return client.DeleteSnapshot(mic, input) + return nil } From 1fffbacdd3197b17aac4ebed79ebc53b18847b88 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 25 Jan 2018 09:17:30 -0800 Subject: [PATCH 093/251] fix ordering of deleting security rules and lists --- .../oracle/classic/step_create_instance.go | 1 - builder/oracle/classic/step_security.go | 23 +++++++++++-------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index bf89a51e3..88c108466 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -20,7 +20,6 @@ func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction keyName := state.Get("key_name").(string) ipAddName := fmt.Sprintf("ipres_%s", config.ImageName) - // secListName := "Megan_packer_test" // hack to get working; fix b4 release secListName := state.Get("security_list").(string) netInfo := compute.NetworkingInfo{ diff --git a/builder/oracle/classic/step_security.go b/builder/oracle/classic/step_security.go index d82f2bc41..cc965caeb 100644 --- a/builder/oracle/classic/step_security.go +++ b/builder/oracle/classic/step_security.go @@ -71,23 +71,26 @@ func (s *stepSecurity) Cleanup(state multistep.StateBag) { client := state.Get("client").(*compute.ComputeClient) ui := state.Get("ui").(packer.Ui) ui.Say("Deleting the packer-generated security rules and lists...") - // delete security list that Packer generated - secListName := state.Get("security_list").(string) - secListClient := client.SecurityLists() - input := compute.DeleteSecurityListInput{Name: secListName} - err := secListClient.DeleteSecurityList(&input) - if err != nil { - ui.Say(fmt.Sprintf("Error deleting the packer-generated security list %s; "+ - "please delete manually. (error : %s)", secListName, err.Error())) - } + // delete security rules that Packer generated secRuleName := state.Get("security_rule_name").(string) secRulesClient := client.SecRules() ruleInput := compute.DeleteSecRuleInput{Name: secRuleName} - err = secRulesClient.DeleteSecRule(&ruleInput) + err := secRulesClient.DeleteSecRule(&ruleInput) if err != nil { ui.Say(fmt.Sprintf("Error deleting the packer-generated security rule %s; "+ "please delete manually. (error: %s)", secRuleName, err.Error())) } + + // delete security list that Packer generated + secListName := state.Get("security_list").(string) + secListClient := client.SecurityLists() + input := compute.DeleteSecurityListInput{Name: secListName} + err = secListClient.DeleteSecurityList(&input) + if err != nil { + ui.Say(fmt.Sprintf("Error deleting the packer-generated security list %s; "+ + "please delete manually. (error : %s)", secListName, err.Error())) + } + return } From 00db189c9c4b6872d7a0baf3031fbc9f2a3d1555 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 25 Jan 2018 14:00:26 -0800 Subject: [PATCH 094/251] add docs page --- .../oracle/classic/step_create_instance.go | 1 - .../docs/builders/oracle-classic.html.md | 98 +++++++++++++++++++ website/source/layouts/docs.erb | 3 + 3 files changed, 101 insertions(+), 1 deletion(-) create mode 100644 website/source/docs/builders/oracle-classic.html.md diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index 88c108466..8a96823aa 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -62,7 +62,6 @@ func (s *stepCreateInstance) Cleanup(state multistep.StateBag) { ui.Say(fmt.Sprintf("Terminating instance (%s)...", imID)) instanceClient := client.Instances() - // Instances Input input := &compute.DeleteInstanceInput{ Name: config.ImageName, ID: imID, diff --git a/website/source/docs/builders/oracle-classic.html.md b/website/source/docs/builders/oracle-classic.html.md new file mode 100644 index 000000000..08d3d10b1 --- /dev/null +++ b/website/source/docs/builders/oracle-classic.html.md @@ -0,0 +1,98 @@ +--- +description: + The oracle-classic builder is able to create new custom images for use with Oracle + Compute Cloud. +layout: docs +page_title: 'Oracle Classic - Builders' +sidebar_current: 'docs-builders-oracle-classic' +--- + +# Oracle Compute Cloud (Classic) Builder + +Type: `oracle-classic` + +The `oracle-classic` Packer builder is able to create custom images for use +with [Oracle Compute Cloud](https://cloud.oracle.com/compute-classic). The builder +takes a base image, runs any provisioning necessary on the base image after +launching it, and finally snapshots it creating a reusable custom image. + +It is recommended that you familiarise yourself with the +[Key Concepts and Terminology](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsg/terminology.html) +prior to using this builder if you have not done so already. + +The builder _does not_ manage images. Once it creates an image, it is up to you +to use it or delete it. + +## Authorization + +This builder authenticates API calls to Compute Classic using basic +authentication (user name and password). +To read more, see the [authentication documentation](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsa/Authentication.html) + +## Configuration Reference + +There are many configuration options available for the `oracle-classic` builder. +This builder currently only works with the SSH communicator. + +### Required + + - `api_endpoint` (string) - This is your custom API endpoint for sending + requests. Instructions for determining your API endpoint can be found + [here](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsa/SendRequests.html) + + - `identity_domain` (string) - This is your customer-specific identity domain + as generated by Oracle. If you don't know what your identity domain is, ask + your account administrator. For a little more information, see the Oracle + [documentation](https://docs.oracle.com/en/cloud/get-started/subscriptions-cloud/ocuid/identity-domain-overview.html#GUID-7969F881-5F4D-443E-B86C-9044C8085B8A). + + - `image_list` (string) - This is what image you want to use as your base image. + See the [documentation](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsg/listing-machine-images.html) + for more details. To see what public image lists are available, you can use + the CLI, as described [here](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stopc/image-lists-stclr-and-nmcli.html#GUID-DB7E75FE-F752-4FF7-AB70-3C8DCDFCA0FA) + + - `password` (string) - Your account password. + + - `shape` (string) - The template that determines the number of + CPUs, amount of memory, and other resources allocated to a newly created + instance. For more information about shapes, see the documentation [here](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsg/machine-images-and-shapes.html). + + - `username` (string) - Your account username. + +### Optional + + - `ssh_username` (string) - The username that Packer will use to SSH into the + instance; defaults to `opc`, the default oracle user, which has sudo + priveliges. If you have already configured users on your machine, you may + prompt Packer to use one of those instead. For more detail, see the + [documentation](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsg/accessing-oracle-linux-instance-using-ssh.html). + + - `image_name` (string) - The name to assign to the resulting custom image. + +## Basic Example + +Here is a basic example. Note that account specific configuration has been +obfuscated; you will need to add a working `username`, `password`, +`identity_domain`, and `api_endpoint` in order for the example to work. + +``` {.json} +{ + "builders": [ + { + "type": "oracle-classic", + "username": "myuser@myaccount.com", + "password": "supersecretpasswordhere", + "identity_domain": "#######", + "api_endpoint": "https://api-###.compute.###.oraclecloud.com/", + "image_list": "/oracle/public/OL_7.2_UEKR4_x86_64", + "shape": "oc3", + "image_name": "Packer_Builder_Test_{{timestamp}}" + } + ], + "provisioners": [ + { + "type": "shell", + "inline": ["echo hello"] + } + ] +} +``` diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 9aa17ccc4..739b33733 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -131,6 +131,9 @@ > OpenStack + > + Oracle Classic + > Oracle OCI From 7d85b31b29fc116cc5c0945b11b568f3865efad2 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 25 Jan 2018 14:01:34 -0800 Subject: [PATCH 095/251] make fmt --- command/plugin.go | 176 ++++++++++++++++++++++------------------------ 1 file changed, 86 insertions(+), 90 deletions(-) diff --git a/command/plugin.go b/command/plugin.go index 4f4228fc3..91b052340 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -14,39 +14,21 @@ import ( "github.com/hashicorp/packer/packer/plugin" alicloudecsbuilder "github.com/hashicorp/packer/builder/alicloud/ecs" - alicloudimportpostprocessor "github.com/hashicorp/packer/post-processor/alicloud-import" amazonchrootbuilder "github.com/hashicorp/packer/builder/amazon/chroot" amazonebsbuilder "github.com/hashicorp/packer/builder/amazon/ebs" amazonebssurrogatebuilder "github.com/hashicorp/packer/builder/amazon/ebssurrogate" amazonebsvolumebuilder "github.com/hashicorp/packer/builder/amazon/ebsvolume" - amazonimportpostprocessor "github.com/hashicorp/packer/post-processor/amazon-import" amazoninstancebuilder "github.com/hashicorp/packer/builder/amazon/instance" - ansiblelocalprovisioner "github.com/hashicorp/packer/provisioner/ansible-local" - ansibleprovisioner "github.com/hashicorp/packer/provisioner/ansible" - artificepostprocessor "github.com/hashicorp/packer/post-processor/artifice" - atlaspostprocessor "github.com/hashicorp/packer/post-processor/atlas" azurearmbuilder "github.com/hashicorp/packer/builder/azure/arm" - checksumpostprocessor "github.com/hashicorp/packer/post-processor/checksum" - chefclientprovisioner "github.com/hashicorp/packer/provisioner/chef-client" - chefsoloprovisioner "github.com/hashicorp/packer/provisioner/chef-solo" cloudstackbuilder "github.com/hashicorp/packer/builder/cloudstack" - compresspostprocessor "github.com/hashicorp/packer/post-processor/compress" - convergeprovisioner "github.com/hashicorp/packer/provisioner/converge" digitaloceanbuilder "github.com/hashicorp/packer/builder/digitalocean" dockerbuilder "github.com/hashicorp/packer/builder/docker" - dockerimportpostprocessor "github.com/hashicorp/packer/post-processor/docker-import" - dockerpushpostprocessor "github.com/hashicorp/packer/post-processor/docker-push" - dockersavepostprocessor "github.com/hashicorp/packer/post-processor/docker-save" - dockertagpostprocessor "github.com/hashicorp/packer/post-processor/docker-tag" filebuilder "github.com/hashicorp/packer/builder/file" - fileprovisioner "github.com/hashicorp/packer/provisioner/file" googlecomputebuilder "github.com/hashicorp/packer/builder/googlecompute" - googlecomputeexportpostprocessor "github.com/hashicorp/packer/post-processor/googlecompute-export" hypervisobuilder "github.com/hashicorp/packer/builder/hyperv/iso" hypervvmcxbuilder "github.com/hashicorp/packer/builder/hyperv/vmcx" lxcbuilder "github.com/hashicorp/packer/builder/lxc" lxdbuilder "github.com/hashicorp/packer/builder/lxd" - manifestpostprocessor "github.com/hashicorp/packer/post-processor/manifest" nullbuilder "github.com/hashicorp/packer/builder/null" oneandonebuilder "github.com/hashicorp/packer/builder/oneandone" openstackbuilder "github.com/hashicorp/packer/builder/openstack" @@ -54,27 +36,44 @@ import ( oracleocibuilder "github.com/hashicorp/packer/builder/oracle/oci" parallelsisobuilder "github.com/hashicorp/packer/builder/parallels/iso" parallelspvmbuilder "github.com/hashicorp/packer/builder/parallels/pvm" - powershellprovisioner "github.com/hashicorp/packer/provisioner/powershell" profitbricksbuilder "github.com/hashicorp/packer/builder/profitbricks" - puppetmasterlessprovisioner "github.com/hashicorp/packer/provisioner/puppet-masterless" - puppetserverprovisioner "github.com/hashicorp/packer/provisioner/puppet-server" qemubuilder "github.com/hashicorp/packer/builder/qemu" - saltmasterlessprovisioner "github.com/hashicorp/packer/provisioner/salt-masterless" - shelllocalpostprocessor "github.com/hashicorp/packer/post-processor/shell-local" - shelllocalprovisioner "github.com/hashicorp/packer/provisioner/shell-local" - shellprovisioner "github.com/hashicorp/packer/provisioner/shell" tritonbuilder "github.com/hashicorp/packer/builder/triton" - vagrantcloudpostprocessor "github.com/hashicorp/packer/post-processor/vagrant-cloud" - vagrantpostprocessor "github.com/hashicorp/packer/post-processor/vagrant" virtualboxisobuilder "github.com/hashicorp/packer/builder/virtualbox/iso" virtualboxovfbuilder "github.com/hashicorp/packer/builder/virtualbox/ovf" vmwareisobuilder "github.com/hashicorp/packer/builder/vmware/iso" vmwarevmxbuilder "github.com/hashicorp/packer/builder/vmware/vmx" + alicloudimportpostprocessor "github.com/hashicorp/packer/post-processor/alicloud-import" + amazonimportpostprocessor "github.com/hashicorp/packer/post-processor/amazon-import" + artificepostprocessor "github.com/hashicorp/packer/post-processor/artifice" + atlaspostprocessor "github.com/hashicorp/packer/post-processor/atlas" + checksumpostprocessor "github.com/hashicorp/packer/post-processor/checksum" + compresspostprocessor "github.com/hashicorp/packer/post-processor/compress" + dockerimportpostprocessor "github.com/hashicorp/packer/post-processor/docker-import" + dockerpushpostprocessor "github.com/hashicorp/packer/post-processor/docker-push" + dockersavepostprocessor "github.com/hashicorp/packer/post-processor/docker-save" + dockertagpostprocessor "github.com/hashicorp/packer/post-processor/docker-tag" + googlecomputeexportpostprocessor "github.com/hashicorp/packer/post-processor/googlecompute-export" + manifestpostprocessor "github.com/hashicorp/packer/post-processor/manifest" + shelllocalpostprocessor "github.com/hashicorp/packer/post-processor/shell-local" + vagrantpostprocessor "github.com/hashicorp/packer/post-processor/vagrant" + vagrantcloudpostprocessor "github.com/hashicorp/packer/post-processor/vagrant-cloud" vspherepostprocessor "github.com/hashicorp/packer/post-processor/vsphere" vspheretemplatepostprocessor "github.com/hashicorp/packer/post-processor/vsphere-template" + ansibleprovisioner "github.com/hashicorp/packer/provisioner/ansible" + ansiblelocalprovisioner "github.com/hashicorp/packer/provisioner/ansible-local" + chefclientprovisioner "github.com/hashicorp/packer/provisioner/chef-client" + chefsoloprovisioner "github.com/hashicorp/packer/provisioner/chef-solo" + convergeprovisioner "github.com/hashicorp/packer/provisioner/converge" + fileprovisioner "github.com/hashicorp/packer/provisioner/file" + powershellprovisioner "github.com/hashicorp/packer/provisioner/powershell" + puppetmasterlessprovisioner "github.com/hashicorp/packer/provisioner/puppet-masterless" + puppetserverprovisioner "github.com/hashicorp/packer/provisioner/puppet-server" + saltmasterlessprovisioner "github.com/hashicorp/packer/provisioner/salt-masterless" + shellprovisioner "github.com/hashicorp/packer/provisioner/shell" + shelllocalprovisioner "github.com/hashicorp/packer/provisioner/shell-local" windowsrestartprovisioner "github.com/hashicorp/packer/provisioner/windows-restart" windowsshellprovisioner "github.com/hashicorp/packer/provisioner/windows-shell" - ) type PluginCommand struct { @@ -82,78 +81,75 @@ type PluginCommand struct { } var Builders = map[string]packer.Builder{ - "alicloud-ecs": new(alicloudecsbuilder.Builder), - "amazon-chroot": new(amazonchrootbuilder.Builder), - "amazon-ebs": new(amazonebsbuilder.Builder), - "amazon-ebssurrogate": new(amazonebssurrogatebuilder.Builder), - "amazon-ebsvolume": new(amazonebsvolumebuilder.Builder), - "amazon-instance": new(amazoninstancebuilder.Builder), - "azure-arm": new(azurearmbuilder.Builder), - "cloudstack": new(cloudstackbuilder.Builder), - "digitalocean": new(digitaloceanbuilder.Builder), - "docker": new(dockerbuilder.Builder), - "file": new(filebuilder.Builder), - "googlecompute": new(googlecomputebuilder.Builder), - "hyperv-iso": new(hypervisobuilder.Builder), - "hyperv-vmcx": new(hypervvmcxbuilder.Builder), - "lxc": new(lxcbuilder.Builder), - "lxd": new(lxdbuilder.Builder), - "null": new(nullbuilder.Builder), - "oneandone": new(oneandonebuilder.Builder), - "openstack": new(openstackbuilder.Builder), - "oracle-classic": new(oracleclassicbuilder.Builder), - "oracle-oci": new(oracleocibuilder.Builder), - "parallels-iso": new(parallelsisobuilder.Builder), - "parallels-pvm": new(parallelspvmbuilder.Builder), - "profitbricks": new(profitbricksbuilder.Builder), - "qemu": new(qemubuilder.Builder), - "triton": new(tritonbuilder.Builder), - "virtualbox-iso": new(virtualboxisobuilder.Builder), - "virtualbox-ovf": new(virtualboxovfbuilder.Builder), - "vmware-iso": new(vmwareisobuilder.Builder), - "vmware-vmx": new(vmwarevmxbuilder.Builder), + "alicloud-ecs": new(alicloudecsbuilder.Builder), + "amazon-chroot": new(amazonchrootbuilder.Builder), + "amazon-ebs": new(amazonebsbuilder.Builder), + "amazon-ebssurrogate": new(amazonebssurrogatebuilder.Builder), + "amazon-ebsvolume": new(amazonebsvolumebuilder.Builder), + "amazon-instance": new(amazoninstancebuilder.Builder), + "azure-arm": new(azurearmbuilder.Builder), + "cloudstack": new(cloudstackbuilder.Builder), + "digitalocean": new(digitaloceanbuilder.Builder), + "docker": new(dockerbuilder.Builder), + "file": new(filebuilder.Builder), + "googlecompute": new(googlecomputebuilder.Builder), + "hyperv-iso": new(hypervisobuilder.Builder), + "hyperv-vmcx": new(hypervvmcxbuilder.Builder), + "lxc": new(lxcbuilder.Builder), + "lxd": new(lxdbuilder.Builder), + "null": new(nullbuilder.Builder), + "oneandone": new(oneandonebuilder.Builder), + "openstack": new(openstackbuilder.Builder), + "oracle-classic": new(oracleclassicbuilder.Builder), + "oracle-oci": new(oracleocibuilder.Builder), + "parallels-iso": new(parallelsisobuilder.Builder), + "parallels-pvm": new(parallelspvmbuilder.Builder), + "profitbricks": new(profitbricksbuilder.Builder), + "qemu": new(qemubuilder.Builder), + "triton": new(tritonbuilder.Builder), + "virtualbox-iso": new(virtualboxisobuilder.Builder), + "virtualbox-ovf": new(virtualboxovfbuilder.Builder), + "vmware-iso": new(vmwareisobuilder.Builder), + "vmware-vmx": new(vmwarevmxbuilder.Builder), } - var Provisioners = map[string]packer.Provisioner{ - "ansible": new(ansibleprovisioner.Provisioner), - "ansible-local": new(ansiblelocalprovisioner.Provisioner), - "chef-client": new(chefclientprovisioner.Provisioner), - "chef-solo": new(chefsoloprovisioner.Provisioner), - "converge": new(convergeprovisioner.Provisioner), - "file": new(fileprovisioner.Provisioner), - "powershell": new(powershellprovisioner.Provisioner), - "puppet-masterless": new(puppetmasterlessprovisioner.Provisioner), - "puppet-server": new(puppetserverprovisioner.Provisioner), + "ansible": new(ansibleprovisioner.Provisioner), + "ansible-local": new(ansiblelocalprovisioner.Provisioner), + "chef-client": new(chefclientprovisioner.Provisioner), + "chef-solo": new(chefsoloprovisioner.Provisioner), + "converge": new(convergeprovisioner.Provisioner), + "file": new(fileprovisioner.Provisioner), + "powershell": new(powershellprovisioner.Provisioner), + "puppet-masterless": new(puppetmasterlessprovisioner.Provisioner), + "puppet-server": new(puppetserverprovisioner.Provisioner), "salt-masterless": new(saltmasterlessprovisioner.Provisioner), - "shell": new(shellprovisioner.Provisioner), - "shell-local": new(shelllocalprovisioner.Provisioner), + "shell": new(shellprovisioner.Provisioner), + "shell-local": new(shelllocalprovisioner.Provisioner), "windows-restart": new(windowsrestartprovisioner.Provisioner), - "windows-shell": new(windowsshellprovisioner.Provisioner), + "windows-shell": new(windowsshellprovisioner.Provisioner), } - var PostProcessors = map[string]packer.PostProcessor{ - "alicloud-import": new(alicloudimportpostprocessor.PostProcessor), - "amazon-import": new(amazonimportpostprocessor.PostProcessor), - "artifice": new(artificepostprocessor.PostProcessor), - "atlas": new(atlaspostprocessor.PostProcessor), - "checksum": new(checksumpostprocessor.PostProcessor), - "compress": new(compresspostprocessor.PostProcessor), - "docker-import": new(dockerimportpostprocessor.PostProcessor), - "docker-push": new(dockerpushpostprocessor.PostProcessor), - "docker-save": new(dockersavepostprocessor.PostProcessor), - "docker-tag": new(dockertagpostprocessor.PostProcessor), - "googlecompute-export": new(googlecomputeexportpostprocessor.PostProcessor), - "manifest": new(manifestpostprocessor.PostProcessor), - "shell-local": new(shelllocalpostprocessor.PostProcessor), - "vagrant": new(vagrantpostprocessor.PostProcessor), - "vagrant-cloud": new(vagrantcloudpostprocessor.PostProcessor), - "vsphere": new(vspherepostprocessor.PostProcessor), - "vsphere-template": new(vspheretemplatepostprocessor.PostProcessor), + "alicloud-import": new(alicloudimportpostprocessor.PostProcessor), + "amazon-import": new(amazonimportpostprocessor.PostProcessor), + "artifice": new(artificepostprocessor.PostProcessor), + "atlas": new(atlaspostprocessor.PostProcessor), + "checksum": new(checksumpostprocessor.PostProcessor), + "compress": new(compresspostprocessor.PostProcessor), + "docker-import": new(dockerimportpostprocessor.PostProcessor), + "docker-push": new(dockerpushpostprocessor.PostProcessor), + "docker-save": new(dockersavepostprocessor.PostProcessor), + "docker-tag": new(dockertagpostprocessor.PostProcessor), + "googlecompute-export": new(googlecomputeexportpostprocessor.PostProcessor), + "manifest": new(manifestpostprocessor.PostProcessor), + "shell-local": new(shelllocalpostprocessor.PostProcessor), + "vagrant": new(vagrantpostprocessor.PostProcessor), + "vagrant-cloud": new(vagrantcloudpostprocessor.PostProcessor), + "vsphere": new(vspherepostprocessor.PostProcessor), + "vsphere-template": new(vspheretemplatepostprocessor.PostProcessor), } - var pluginRegexp = regexp.MustCompile("packer-(builder|post-processor|provisioner)-(.+)") func (c *PluginCommand) Run(args []string) int { From dd2384483bb37fcb51e15b0ffb16cfe7bf844811 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 25 Jan 2018 14:42:39 -0800 Subject: [PATCH 096/251] add context to steps --- builder/oracle/classic/step_add_keys.go | 2 +- builder/oracle/classic/step_create_instance.go | 2 +- builder/oracle/classic/step_create_ip_reservation.go | 2 +- builder/oracle/classic/step_security.go | 2 +- builder/oracle/classic/step_snapshot.go | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/builder/oracle/classic/step_add_keys.go b/builder/oracle/classic/step_add_keys.go index f28a9bce9..accb613de 100644 --- a/builder/oracle/classic/step_add_keys.go +++ b/builder/oracle/classic/step_add_keys.go @@ -11,7 +11,7 @@ import ( type stepAddKeysToAPI struct{} -func (s *stepAddKeysToAPI) Run(state multistep.StateBag) multistep.StepAction { +func (s *stepAddKeysToAPI) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { // get variables from state ui := state.Get("ui").(packer.Ui) ui.Say("Adding SSH keys to API...") diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index 8a96823aa..65b7d8736 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -11,7 +11,7 @@ import ( type stepCreateInstance struct{} -func (s *stepCreateInstance) Run(state multistep.StateBag) multistep.StepAction { +func (s *stepCreateInstance) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { // get variables from state ui := state.Get("ui").(packer.Ui) ui.Say("Creating Instance...") diff --git a/builder/oracle/classic/step_create_ip_reservation.go b/builder/oracle/classic/step_create_ip_reservation.go index 733c07e70..d57aa41ea 100644 --- a/builder/oracle/classic/step_create_ip_reservation.go +++ b/builder/oracle/classic/step_create_ip_reservation.go @@ -11,7 +11,7 @@ import ( type stepCreateIPReservation struct{} -func (s *stepCreateIPReservation) Run(state multistep.StateBag) multistep.StepAction { +func (s *stepCreateIPReservation) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say("Creating IP reservation...") config := state.Get("config").(*Config) diff --git a/builder/oracle/classic/step_security.go b/builder/oracle/classic/step_security.go index cc965caeb..c3184f36d 100644 --- a/builder/oracle/classic/step_security.go +++ b/builder/oracle/classic/step_security.go @@ -12,7 +12,7 @@ import ( type stepSecurity struct{} -func (s *stepSecurity) Run(state multistep.StateBag) multistep.StepAction { +func (s *stepSecurity) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say("Configuring security lists and rules to enable SSH access...") diff --git a/builder/oracle/classic/step_snapshot.go b/builder/oracle/classic/step_snapshot.go index 2e4e9d7a3..5fa838e0e 100644 --- a/builder/oracle/classic/step_snapshot.go +++ b/builder/oracle/classic/step_snapshot.go @@ -10,7 +10,7 @@ import ( type stepSnapshot struct{} -func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { +func (s *stepSnapshot) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { // get variables from state ui := state.Get("ui").(packer.Ui) ui.Say("Creating Snapshot...") From 6dc0bd759ad1eae8ffeec67a3fe916d7f1cf9ae6 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 25 Jan 2018 14:43:55 -0800 Subject: [PATCH 097/251] import context --- builder/oracle/classic/step_add_keys.go | 1 + builder/oracle/classic/step_create_instance.go | 1 + builder/oracle/classic/step_create_ip_reservation.go | 1 + builder/oracle/classic/step_security.go | 1 + builder/oracle/classic/step_snapshot.go | 1 + 5 files changed, 5 insertions(+) diff --git a/builder/oracle/classic/step_add_keys.go b/builder/oracle/classic/step_add_keys.go index accb613de..4e9c46d61 100644 --- a/builder/oracle/classic/step_add_keys.go +++ b/builder/oracle/classic/step_add_keys.go @@ -1,6 +1,7 @@ package classic import ( + "context" "fmt" "strings" diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index 65b7d8736..a0183832d 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -1,6 +1,7 @@ package classic import ( + "context" "fmt" "log" diff --git a/builder/oracle/classic/step_create_ip_reservation.go b/builder/oracle/classic/step_create_ip_reservation.go index d57aa41ea..034cf5925 100644 --- a/builder/oracle/classic/step_create_ip_reservation.go +++ b/builder/oracle/classic/step_create_ip_reservation.go @@ -1,6 +1,7 @@ package classic import ( + "context" "fmt" "log" diff --git a/builder/oracle/classic/step_security.go b/builder/oracle/classic/step_security.go index c3184f36d..5cfc0e0d5 100644 --- a/builder/oracle/classic/step_security.go +++ b/builder/oracle/classic/step_security.go @@ -1,6 +1,7 @@ package classic import ( + "context" "fmt" "log" "strings" diff --git a/builder/oracle/classic/step_snapshot.go b/builder/oracle/classic/step_snapshot.go index 5fa838e0e..4d1c531a7 100644 --- a/builder/oracle/classic/step_snapshot.go +++ b/builder/oracle/classic/step_snapshot.go @@ -1,6 +1,7 @@ package classic import ( + "context" "fmt" "github.com/hashicorp/go-oracle-terraform/compute" From 4dc42942f5597a2155ac221b9c59fe7f53285ccd Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 25 Jan 2018 14:45:09 -0800 Subject: [PATCH 098/251] fix multistep path --- builder/oracle/classic/builder.go | 2 +- builder/oracle/classic/step_add_keys.go | 2 +- builder/oracle/classic/step_create_instance.go | 2 +- builder/oracle/classic/step_create_ip_reservation.go | 2 +- builder/oracle/classic/step_security.go | 2 +- builder/oracle/classic/step_snapshot.go | 2 +- builder/oracle/common/ssh.go | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go index 7847ef179..fd084b366 100644 --- a/builder/oracle/classic/builder.go +++ b/builder/oracle/classic/builder.go @@ -10,8 +10,8 @@ import ( ocommon "github.com/hashicorp/packer/builder/oracle/common" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/communicator" + "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" ) // BuilderId uniquely identifies the builder diff --git a/builder/oracle/classic/step_add_keys.go b/builder/oracle/classic/step_add_keys.go index 4e9c46d61..e8013cf88 100644 --- a/builder/oracle/classic/step_add_keys.go +++ b/builder/oracle/classic/step_add_keys.go @@ -6,8 +6,8 @@ import ( "strings" "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" ) type stepAddKeysToAPI struct{} diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index a0183832d..9438431bb 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -6,8 +6,8 @@ import ( "log" "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" ) type stepCreateInstance struct{} diff --git a/builder/oracle/classic/step_create_ip_reservation.go b/builder/oracle/classic/step_create_ip_reservation.go index 034cf5925..f24de7766 100644 --- a/builder/oracle/classic/step_create_ip_reservation.go +++ b/builder/oracle/classic/step_create_ip_reservation.go @@ -6,8 +6,8 @@ import ( "log" "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" ) type stepCreateIPReservation struct{} diff --git a/builder/oracle/classic/step_security.go b/builder/oracle/classic/step_security.go index 5cfc0e0d5..1e00b2c7b 100644 --- a/builder/oracle/classic/step_security.go +++ b/builder/oracle/classic/step_security.go @@ -7,8 +7,8 @@ import ( "strings" "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" ) type stepSecurity struct{} diff --git a/builder/oracle/classic/step_snapshot.go b/builder/oracle/classic/step_snapshot.go index 4d1c531a7..30a7d32c8 100644 --- a/builder/oracle/classic/step_snapshot.go +++ b/builder/oracle/classic/step_snapshot.go @@ -5,8 +5,8 @@ import ( "fmt" "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" ) type stepSnapshot struct{} diff --git a/builder/oracle/common/ssh.go b/builder/oracle/common/ssh.go index aa2780128..8e448e592 100644 --- a/builder/oracle/common/ssh.go +++ b/builder/oracle/common/ssh.go @@ -4,7 +4,7 @@ import ( "fmt" packerssh "github.com/hashicorp/packer/communicator/ssh" - "github.com/mitchellh/multistep" + "github.com/hashicorp/packer/helper/multistep" "golang.org/x/crypto/ssh" ) From 7f21ca546ddc0198af14738c46ac7c2e95b92a43 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 25 Jan 2018 14:47:30 -0800 Subject: [PATCH 099/251] we're not using go-getter --- vendor/vendor.json | 6 ------ 1 file changed, 6 deletions(-) diff --git a/vendor/vendor.json b/vendor/vendor.json index 256c2e1d2..fe36fe283 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -785,12 +785,6 @@ "path": "github.com/hashicorp/go-cleanhttp", "revision": "875fb671b3ddc66f8e2f0acc33829c8cb989a38d" }, - { - "checksumSHA1": "1hPCerVn1bWA+9vaAGqnIkRtSFc=", - "path": "github.com/hashicorp/go-getter", - "revision": "cb2c2774e9771bd9568d843be4e59298786550fd", - "revisionTime": "2017-12-20T21:10:09Z" - }, { "checksumSHA1": "lrSl49G23l6NhfilxPM0XFs5rZo=", "path": "github.com/hashicorp/go-multierror", From 0fad49e897ea9446cacffb0d2c80a27ec8acd495 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 25 Jan 2018 15:05:36 -0800 Subject: [PATCH 100/251] simplify --- builder/oracle/classic/step_create_instance.go | 1 - builder/oracle/classic/step_security.go | 2 -- 2 files changed, 3 deletions(-) diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index 9438431bb..2f8493fd1 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -78,5 +78,4 @@ func (s *stepCreateInstance) Cleanup(state multistep.StateBag) { } // TODO wait for instance state to change to deleted? ui.Say("Terminated instance.") - return } diff --git a/builder/oracle/classic/step_security.go b/builder/oracle/classic/step_security.go index 1e00b2c7b..8b1c48a4c 100644 --- a/builder/oracle/classic/step_security.go +++ b/builder/oracle/classic/step_security.go @@ -92,6 +92,4 @@ func (s *stepSecurity) Cleanup(state multistep.StateBag) { ui.Say(fmt.Sprintf("Error deleting the packer-generated security list %s; "+ "please delete manually. (error : %s)", secListName, err.Error())) } - - return } From 77277ebc98d0337d4c9507d957eda925c0e9a20b Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 25 Jan 2018 16:12:03 -0800 Subject: [PATCH 101/251] add logging behind "PACKER_OCI_CLASSIC_LOGGING" env var --- builder/oracle/classic/builder.go | 3 +++ builder/oracle/classic/log.go | 14 ++++++++++++++ 2 files changed, 17 insertions(+) create mode 100644 builder/oracle/classic/log.go diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go index fd084b366..7c682b875 100644 --- a/builder/oracle/classic/builder.go +++ b/builder/oracle/classic/builder.go @@ -3,6 +3,7 @@ package classic import ( "fmt" "log" + "os" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-oracle-terraform/compute" @@ -35,6 +36,7 @@ func (b *Builder) Prepare(rawConfig ...interface{}) ([]string, error) { func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { + loggingEnabled := os.Getenv("PACKER_OCI_CLASSIC_LOGGING")) != "" httpClient := cleanhttp.DefaultClient() config := &opc.Config{ Username: opc.String(b.config.Username), @@ -42,6 +44,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe IdentityDomain: opc.String(b.config.IdentityDomain), APIEndpoint: b.config.apiEndpointURL, LogLevel: opc.LogDebug, + Logger: &Logger{loggingEnabled}, // Logger: # Leave blank to use the default logger, or provide your own HTTPClient: httpClient, } diff --git a/builder/oracle/classic/log.go b/builder/oracle/classic/log.go new file mode 100644 index 000000000..7389dc057 --- /dev/null +++ b/builder/oracle/classic/log.go @@ -0,0 +1,14 @@ +package classic + +import "log" + +type Logger struct { + Enabled bool +} + +func (l *Logger) Log(input ...interface{}) { + if !l.Enabled { + return + } + log.Println(input...) +} From dbf5d52c43bfe83bbd3a6fc05b4e41032d407cf1 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 25 Jan 2018 16:20:14 -0800 Subject: [PATCH 102/251] update mapstructure library --- .../mitchellh/mapstructure/README.md | 2 +- .../mitchellh/mapstructure/decode_hooks.go | 19 +- .../mitchellh/mapstructure/mapstructure.go | 266 ++++++++++++++---- vendor/vendor.json | 5 +- 4 files changed, 231 insertions(+), 61 deletions(-) diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md index 659d6885f..7ecc785e4 100644 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -1,4 +1,4 @@ -# mapstructure +# mapstructure [![Godoc](https://godoc.org/github.com/mitchell/mapstructure?status.svg)](https://godoc.org/github.com/mitchell/mapstructure) mapstructure is a Go library for decoding generic map values to structures and vice versa, while providing helpful error handling. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index aa91f76ce..afcfd5eed 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -38,12 +38,6 @@ func DecodeHookExec( raw DecodeHookFunc, from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { - // Build our arguments that reflect expects - argVals := make([]reflect.Value, 3) - argVals[0] = reflect.ValueOf(from) - argVals[1] = reflect.ValueOf(to) - argVals[2] = reflect.ValueOf(data) - switch f := typedDecodeHook(raw).(type) { case DecodeHookFuncType: return f(from, to, data) @@ -72,7 +66,10 @@ func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { } // Modify the from kind to be correct with the new data - f = reflect.ValueOf(data).Type() + f = nil + if val := reflect.ValueOf(data); val.IsValid() { + f = val.Type() + } } return data, nil @@ -118,6 +115,11 @@ func StringToTimeDurationHookFunc() DecodeHookFunc { } } +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. func WeaklyTypedHook( f reflect.Kind, t reflect.Kind, @@ -129,9 +131,8 @@ func WeaklyTypedHook( case reflect.Bool: if dataVal.Bool() { return "1", nil - } else { - return "0", nil } + return "0", nil case reflect.Float32: return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil case reflect.Int: diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 40be5116d..39ec1e943 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -1,5 +1,5 @@ -// The mapstructure package exposes functionality to convert an -// abitrary map[string]interface{} into a native Go structure. +// Package mapstructure exposes functionality to convert an arbitrary +// map[string]interface{} into a native Go structure. // // The Go structure can be arbitrarily complex, containing slices, // other structs, etc. and the decoder will properly decode nested @@ -8,6 +8,7 @@ package mapstructure import ( + "encoding/json" "errors" "fmt" "reflect" @@ -31,7 +32,12 @@ import ( // both. type DecodeHookFunc interface{} +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) // DecoderConfig is the configuration that is used to create a new decoder @@ -67,6 +73,10 @@ type DecoderConfig struct { // FALSE, false, False. Anything else is an error) // - empty array = empty map and vice versa // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. // WeaklyTypedInput bool @@ -200,7 +210,7 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error d.config.DecodeHook, dataVal.Type(), val.Type(), data) if err != nil { - return err + return fmt.Errorf("error decoding '%s': %s", name, err) } } @@ -227,6 +237,10 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error err = d.decodePtr(name, data, val) case reflect.Slice: err = d.decodeSlice(name, data, val) + case reflect.Array: + err = d.decodeArray(name, data, val) + case reflect.Func: + err = d.decodeFunc(name, data, val) default: // If we reached this point then we weren't able to decode it return fmt.Errorf("%s: unsupported type: %s", name, dataKind) @@ -245,6 +259,10 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error // value to "data" of that type. func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + dataValType := dataVal.Type() if !dataValType.AssignableTo(val.Type()) { return fmt.Errorf( @@ -276,12 +294,22 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput: + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: dataType := dataVal.Type() elemKind := dataType.Elem().Kind() - switch { - case elemKind == reflect.Uint8: - val.SetString(string(dataVal.Interface().([]uint8))) + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) default: converted = false } @@ -301,6 +329,7 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { dataVal := reflect.ValueOf(data) dataKind := getKind(dataVal) + dataType := dataVal.Type() switch { case dataKind == reflect.Int: @@ -322,6 +351,14 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er } else { return fmt.Errorf("cannot parse '%s' as int: %s", name, err) } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) default: return fmt.Errorf( "'%s' expected type '%s', got unconvertible type '%s'", @@ -408,6 +445,7 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { dataVal := reflect.ValueOf(data) dataKind := getKind(dataVal) + dataType := dataVal.Type() switch { case dataKind == reflect.Int: @@ -415,7 +453,7 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) case dataKind == reflect.Uint: val.SetFloat(float64(dataVal.Uint())) case dataKind == reflect.Float32: - val.SetFloat(float64(dataVal.Float())) + val.SetFloat(dataVal.Float()) case dataKind == reflect.Bool && d.config.WeaklyTypedInput: if dataVal.Bool() { val.SetFloat(1) @@ -429,6 +467,14 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) } else { return fmt.Errorf("cannot parse '%s' as float: %s", name, err) } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) default: return fmt.Errorf( "'%s' expected type '%s', got unconvertible type '%s'", @@ -456,15 +502,30 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er // Check input type dataVal := reflect.Indirect(reflect.ValueOf(data)) if dataVal.Kind() != reflect.Map { - // Accept empty array/slice instead of an empty map in weakly typed mode - if d.config.WeaklyTypedInput && - (dataVal.Kind() == reflect.Slice || dataVal.Kind() == reflect.Array) && - dataVal.Len() == 0 { - val.Set(valMap) - return nil - } else { - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + // In weak mode, we accept a slice of maps as an input... + if d.config.WeaklyTypedInput { + switch dataVal.Kind() { + case reflect.Array, reflect.Slice: + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil + } } + + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) } // Accumulate errors @@ -507,7 +568,12 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er // into that. Then set the value of the pointer to this type. valType := val.Type() valElemType := valType.Elem() - realVal := reflect.New(valElemType) + + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { return err } @@ -516,6 +582,19 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er return nil } +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + val.Set(dataVal) + return nil +} + func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { dataVal := reflect.Indirect(reflect.ValueOf(data)) dataValKind := dataVal.Kind() @@ -523,26 +602,44 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) valElemType := valType.Elem() sliceType := reflect.SliceOf(valElemType) - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - // Accept empty map instead of array/slice in weakly typed mode - if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } else { + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + return fmt.Errorf( "'%s': source data must be an array or slice, got %s", name, dataValKind) - } - } - // Make a new slice to hold our result, same size as the original data. - valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } // Accumulate any errors errors := make([]string, 0) for i := 0; i < dataVal.Len(); i++ { currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } currentField := valSlice.Index(i) fieldName := fmt.Sprintf("%s[%d]", name, i) @@ -562,6 +659,73 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) return nil } +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { dataVal := reflect.Indirect(reflect.ValueOf(data)) @@ -601,23 +765,20 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Compile the list of all the fields that we're going to be decoding // from all the structs. - fields := make(map[*reflect.StructField]reflect.Value) + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} for len(structs) > 0 { structVal := structs[0] structs = structs[1:] structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { fieldType := structType.Field(i) - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind)) - continue - } - } + fieldKind := fieldType.Type.Kind() // If "squash" is specified in the tag, we squash the field down. squash := false @@ -630,19 +791,26 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) } if squash { - structs = append(structs, val.FieldByName(fieldType.Name)) + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + } else { + structs = append(structs, structVal.FieldByName(fieldType.Name)) + } continue } // Normal struct field, store it away - fields[&fieldType] = structVal.Field(i) + fields = append(fields, field{fieldType, structVal.Field(i)}) } } - for fieldType, field := range fields { - fieldName := fieldType.Name + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name - tagValue := fieldType.Tag.Get(d.config.TagName) + tagValue := field.Tag.Get(d.config.TagName) tagValue = strings.SplitN(tagValue, ",", 2)[0] if tagValue != "" { fieldName = tagValue @@ -653,7 +821,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) if !rawMapVal.IsValid() { // Do a slower search by iterating over each key and // doing case-insensitive search. - for dataValKey, _ := range dataValKeys { + for dataValKey := range dataValKeys { mK, ok := dataValKey.Interface().(string) if !ok { // Not a string key @@ -677,14 +845,14 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Delete the key we're using from the unused map so we stop tracking delete(dataValKeysUnused, rawMapKey.Interface()) - if !field.IsValid() { + if !fieldValue.IsValid() { // This should never happen panic("field is not valid") } // If we can't set the field, then it is unexported or something, // and we just continue onwards. - if !field.CanSet() { + if !fieldValue.CanSet() { continue } @@ -694,14 +862,14 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) fieldName = fmt.Sprintf("%s.%s", name, fieldName) } - if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { errors = appendErrors(errors, err) } } if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey, _ := range dataValKeysUnused { + for rawKey := range dataValKeysUnused { keys = append(keys, rawKey.(string)) } sort.Strings(keys) @@ -716,7 +884,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Add the unused keys to the list of unused keys if we're tracking metadata if d.config.Metadata != nil { - for rawKey, _ := range dataValKeysUnused { + for rawKey := range dataValKeysUnused { key := rawKey.(string) if name != "" { key = fmt.Sprintf("%s.%s", name, key) diff --git a/vendor/vendor.json b/vendor/vendor.json index fe36fe283..6172b8c33 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -981,9 +981,10 @@ "revision": "87b45ffd0e9581375c491fef3d32130bb15c5bd7" }, { - "checksumSHA1": "4Js6Jlu93Wa0o6Kjt393L9Z7diE=", + "checksumSHA1": "1JtAhgmRN0x794LRNhs0DJ5t8io=", "path": "github.com/mitchellh/mapstructure", - "revision": "281073eb9eb092240d33ef253c404f1cca550309" + "revision": "b4575eea38cca1123ec2dc90c26529b5c5acfcff", + "revisionTime": "2018-01-11T00:07:20Z" }, { "checksumSHA1": "m2L8ohfZiFRsMW3iynaH/TWgnSY=", From 4622bb4585994ae4a3bfbefc69ebfb0a8b929c06 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 25 Jan 2018 16:29:24 -0800 Subject: [PATCH 103/251] return no artifact if no snapshot was created --- builder/oracle/classic/builder.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go index 7c682b875..9264e6466 100644 --- a/builder/oracle/classic/builder.go +++ b/builder/oracle/classic/builder.go @@ -92,6 +92,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, rawErr.(error) } + // If there is no snapshot, then just return + if _, ok := state.GetOk("snapshot"); !ok { + return nil, nil + } + // Build the artifact and return it artifact := &Artifact{ Snapshot: state.Get("snapshot").(*compute.Snapshot), From de2e5edf2ee57bd0dda934d23d9fc9d2aa96e822 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 26 Jan 2018 08:43:51 -0800 Subject: [PATCH 104/251] remove errant change in amazon builder --- builder/amazon/common/step_key_pair.go | 3 +-- builder/oracle/classic/builder.go | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index 55f5aace7..927101fcf 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -6,7 +6,6 @@ import ( "io/ioutil" "os" "runtime" - "strings" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/packer/helper/multistep" @@ -37,7 +36,7 @@ func (s *StepKeyPair) Run(_ context.Context, state multistep.StateBag) multistep } state.Put("keyPair", s.KeyPairName) - state.Put("privateKey", strings.TrimSpace(string(privateKeyBytes))) + state.Put("privateKey", string(privateKeyBytes)) return multistep.ActionContinue } diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go index 9264e6466..8537289cf 100644 --- a/builder/oracle/classic/builder.go +++ b/builder/oracle/classic/builder.go @@ -35,8 +35,7 @@ func (b *Builder) Prepare(rawConfig ...interface{}) ([]string, error) { } func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { - - loggingEnabled := os.Getenv("PACKER_OCI_CLASSIC_LOGGING")) != "" + loggingEnabled := os.Getenv("PACKER_OCI_CLASSIC_LOGGING") != "" httpClient := cleanhttp.DefaultClient() config := &opc.Config{ Username: opc.String(b.config.Username), From 25bc1da8fee6f2314777bd47346e7151e7c6d65f Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 26 Jan 2018 08:48:23 -0800 Subject: [PATCH 105/251] remove unsused access config --- builder/oracle/classic/config.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index eb3fd6876..409118dd5 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -15,8 +15,6 @@ type Config struct { common.PackerConfig `mapstructure:",squash"` Comm communicator.Config `mapstructure:",squash"` - Access *AccessConfig - // Access config overrides Username string `mapstructure:"username"` Password string `mapstructure:"password"` From b6d21ecd63fc17650d32efadeaa2ffc85b1793f6 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 26 Jan 2018 08:53:24 -0800 Subject: [PATCH 106/251] validate that required fields are present --- builder/oracle/classic/config.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index 409118dd5..178bb2d4b 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -56,7 +56,26 @@ func NewConfig(raws ...interface{}) (*Config, error) { c.SSHSourceList = "seciplist:/oracle/public/public-internet" } + // Validate that all required fields are present var errs *packer.MultiError + required := map[string]string{ + "username": c.Username, + "password": c.Password, + "api_endpoint": c.APIEndpoint, + "identity_domain": c.IdentityDomain, + "image_list": c.ImageList, + "shape": c.Shape, + } + for k, v := range required { + if v == "" { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("You must specify a %s.", k)) + } + } + + if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { + errs = packer.MultiErrorAppend(errs, es...) + } + if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { errs = packer.MultiErrorAppend(errs, es...) } From ff9fef5ed2db3b2be53c4074ed08393d24895491 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 26 Jan 2018 09:51:16 -0800 Subject: [PATCH 107/251] switch to using a UUID for packer-generated keys, and clean them up at end of build --- builder/oracle/classic/step_add_keys.go | 40 +++++++++++++++---------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/builder/oracle/classic/step_add_keys.go b/builder/oracle/classic/step_add_keys.go index e8013cf88..8a6025977 100644 --- a/builder/oracle/classic/step_add_keys.go +++ b/builder/oracle/classic/step_add_keys.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/hashicorp/go-oracle-terraform/compute" + uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" ) @@ -23,7 +24,13 @@ func (s *stepAddKeysToAPI) Run(_ context.Context, state multistep.StateBag) mult sshPublicKey := strings.TrimSpace(state.Get("publicKey").(string)) // form API call to add key to compute cloud - sshKeyName := fmt.Sprintf("/Compute-%s/%s/packer_generated_key", config.IdentityDomain, config.Username) + uuid_string, err := uuid.GenerateUUID() + if err != nil { + ui.Error(fmt.Sprintf("Error creating unique SSH key name: %s", err.Error())) + return multistep.ActionHalt + } + sshKeyName := fmt.Sprintf("/Compute-%s/%s/packer_generated_key_%s", + config.IdentityDomain, config.Username, uuid_string) sshKeysClient := client.SSHKeys() sshKeysInput := compute.CreateSSHKeyInput{ @@ -35,25 +42,26 @@ func (s *stepAddKeysToAPI) Run(_ context.Context, state multistep.StateBag) mult // Load the packer-generated SSH key into the Oracle Compute cloud. keyInfo, err := sshKeysClient.CreateSSHKey(&sshKeysInput) if err != nil { - // Key already exists; update key instead of creating it - if strings.Contains(err.Error(), "packer_generated_key already exists") { - updateKeysInput := compute.UpdateSSHKeyInput{ - Name: sshKeyName, - Key: sshPublicKey, - Enabled: true, - } - keyInfo, err = sshKeysClient.UpdateSSHKey(&updateKeysInput) - } else { - err = fmt.Errorf("Problem adding Public SSH key through Oracle's API: %s", err) - ui.Error(err.Error()) - state.Put("error", err) - return multistep.ActionHalt - } + err = fmt.Errorf("Problem adding Public SSH key through Oracle's API: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt } state.Put("key_name", keyInfo.Name) return multistep.ActionContinue } func (s *stepAddKeysToAPI) Cleanup(state multistep.StateBag) { - // Nothing to do + // Delete the keys we created during this run + keyName := state.Get("key_name").(string) + ui := state.Get("ui").(packer.Ui) + ui.Say("Deleting SSH keys...") + deleteInput := compute.DeleteSSHKeyInput{Name: keyName} + client := state.Get("client").(*compute.ComputeClient) + deleteClient := client.SSHKeys() + err := deleteClient.DeleteSSHKey(&deleteInput) + if err != nil { + ui.Error(fmt.Sprintf("Error deleting SSH keys: %s", err.Error())) + } + return } From 0e5be59947ba353a09cd0c2a24e057b459937a51 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 26 Jan 2018 09:55:31 -0800 Subject: [PATCH 108/251] wrap error message for clarity --- builder/oracle/classic/step_create_ip_reservation.go | 2 -- builder/oracle/classic/step_security.go | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/builder/oracle/classic/step_create_ip_reservation.go b/builder/oracle/classic/step_create_ip_reservation.go index f24de7766..84154631b 100644 --- a/builder/oracle/classic/step_create_ip_reservation.go +++ b/builder/oracle/classic/step_create_ip_reservation.go @@ -3,7 +3,6 @@ package classic import ( "context" "fmt" - "log" "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/packer/helper/multistep" @@ -33,7 +32,6 @@ func (s *stepCreateIPReservation) Run(_ context.Context, state multistep.StateBa return multistep.ActionHalt } state.Put("instance_ip", ipRes.IP) - log.Printf("debug: ipRes is %#v", ipRes) return multistep.ActionContinue } diff --git a/builder/oracle/classic/step_security.go b/builder/oracle/classic/step_security.go index 8b1c48a4c..dae341ba6 100644 --- a/builder/oracle/classic/step_security.go +++ b/builder/oracle/classic/step_security.go @@ -54,7 +54,7 @@ func (s *stepSecurity) Run(_ context.Context, state multistep.StateBag) multiste config.IdentityDomain, config.Username, config.ImageName) _, err = secRulesClient.CreateSecRule(&secRulesInput) if err != nil { - log.Printf(err.Error()) + log.Printf("Error creating security rule to allow SSH: %s", err.Error()) if !strings.Contains(err.Error(), "already exists") { err = fmt.Errorf("Error creating security rule to"+ " allow Packer to connect to Oracle instance via SSH: %s", err) From fad4d5c27248ea350d4321e80462f4739057cf68 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 26 Jan 2018 12:40:34 -0800 Subject: [PATCH 109/251] update tests for mapstructure behavior changes --- builder/amazon/chroot/builder_test.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/builder/amazon/chroot/builder_test.go b/builder/amazon/chroot/builder_test.go index c13658286..c0e52a2f6 100644 --- a/builder/amazon/chroot/builder_test.go +++ b/builder/amazon/chroot/builder_test.go @@ -71,11 +71,16 @@ func TestBuilderPrepare_ChrootMounts(t *testing.T) { if err != nil { t.Errorf("err: %s", err) } +} + +func TestBuilderPrepare_ChrootMountsBadDefaults(t *testing.T) { + b := &Builder{} + config := testConfig() config["chroot_mounts"] = [][]string{ {"bad"}, } - warnings, err = b.Prepare(config) + warnings, err := b.Prepare(config) if len(warnings) > 0 { t.Fatalf("bad: %#v", warnings) } @@ -135,9 +140,14 @@ func TestBuilderPrepare_CopyFiles(t *testing.T) { if len(b.config.CopyFiles) != 1 && b.config.CopyFiles[0] != "/etc/resolv.conf" { t.Errorf("Was expecting default value for copy_files.") } +} + +func TestBuilderPrepare_CopyFilesNoDefault(t *testing.T) { + b := &Builder{} + config := testConfig() config["copy_files"] = []string{} - warnings, err = b.Prepare(config) + warnings, err := b.Prepare(config) if len(warnings) > 0 { t.Fatalf("bad: %#v", warnings) } From 18ffde4ecfa39402af0c9ddef12138222e51c8e4 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 26 Jan 2018 12:59:46 -0800 Subject: [PATCH 110/251] remove unused file --- builder/oracle/classic/access_config.go | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 builder/oracle/classic/access_config.go diff --git a/builder/oracle/classic/access_config.go b/builder/oracle/classic/access_config.go deleted file mode 100644 index 99483ceec..000000000 --- a/builder/oracle/classic/access_config.go +++ /dev/null @@ -1,4 +0,0 @@ -package classic - -type AccessConfig struct { -} From 71acccc1ed4afa077d0a1d65c8efba628f7077f2 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 26 Jan 2018 13:12:35 -0800 Subject: [PATCH 111/251] add UI output with resource names --- builder/oracle/classic/step_add_keys.go | 3 ++- builder/oracle/classic/step_create_instance.go | 4 ++-- builder/oracle/classic/step_create_ip_reservation.go | 9 +++++++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/builder/oracle/classic/step_add_keys.go b/builder/oracle/classic/step_add_keys.go index 8a6025977..b8331eb94 100644 --- a/builder/oracle/classic/step_add_keys.go +++ b/builder/oracle/classic/step_add_keys.go @@ -16,7 +16,6 @@ type stepAddKeysToAPI struct{} func (s *stepAddKeysToAPI) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { // get variables from state ui := state.Get("ui").(packer.Ui) - ui.Say("Adding SSH keys to API...") config := state.Get("config").(*Config) client := state.Get("client").(*compute.ComputeClient) @@ -32,6 +31,8 @@ func (s *stepAddKeysToAPI) Run(_ context.Context, state multistep.StateBag) mult sshKeyName := fmt.Sprintf("/Compute-%s/%s/packer_generated_key_%s", config.IdentityDomain, config.Username, uuid_string) + ui.Say(fmt.Sprintf("Creating temporary key: %s", sshKeyName)) + sshKeysClient := client.SSHKeys() sshKeysInput := compute.CreateSSHKeyInput{ Name: sshKeyName, diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index 2f8493fd1..fd70c6afa 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -16,11 +16,11 @@ func (s *stepCreateInstance) Run(_ context.Context, state multistep.StateBag) mu // get variables from state ui := state.Get("ui").(packer.Ui) ui.Say("Creating Instance...") + config := state.Get("config").(*Config) client := state.Get("client").(*compute.ComputeClient) keyName := state.Get("key_name").(string) - - ipAddName := fmt.Sprintf("ipres_%s", config.ImageName) + ipAddName := state.Get("ipres_name").(string) secListName := state.Get("security_list").(string) netInfo := compute.NetworkingInfo{ diff --git a/builder/oracle/classic/step_create_ip_reservation.go b/builder/oracle/classic/step_create_ip_reservation.go index 84154631b..8e23be785 100644 --- a/builder/oracle/classic/step_create_ip_reservation.go +++ b/builder/oracle/classic/step_create_ip_reservation.go @@ -13,15 +13,19 @@ type stepCreateIPReservation struct{} func (s *stepCreateIPReservation) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) - ui.Say("Creating IP reservation...") + config := state.Get("config").(*Config) client := state.Get("client").(*compute.ComputeClient) iprClient := client.IPReservations() // TODO: add optional Name and Tags + + ipresName := fmt.Sprintf("ipres_%s", config.ImageName) + ui.Say(fmt.Sprintf("Creating IP reservation: %s", ipresName)) + IPInput := &compute.CreateIPReservationInput{ ParentPool: compute.PublicReservationPool, Permanent: true, - Name: fmt.Sprintf("ipres_%s", config.ImageName), + Name: ipresName, } ipRes, err := iprClient.CreateIPReservation(IPInput) @@ -32,6 +36,7 @@ func (s *stepCreateIPReservation) Run(_ context.Context, state multistep.StateBa return multistep.ActionHalt } state.Put("instance_ip", ipRes.IP) + state.Put("ipres_name", ipresName) return multistep.ActionContinue } From c6b43ce6e9f645258f6898e0d4bf1646882865a0 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 26 Jan 2018 13:13:13 -0800 Subject: [PATCH 112/251] remove errouneous double prep --- builder/oracle/classic/config.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index 178bb2d4b..540f24503 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -76,9 +76,5 @@ func NewConfig(raws ...interface{}) (*Config, error) { errs = packer.MultiErrorAppend(errs, es...) } - if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { - errs = packer.MultiErrorAppend(errs, es...) - } - return c, nil } From 9edd98f7b0fda9d791e9944c6a06dd0a896f70e7 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 26 Jan 2018 13:43:19 -0800 Subject: [PATCH 113/251] Use more uuids and make messaging consistent. --- builder/oracle/classic/step_add_keys.go | 9 ++------- builder/oracle/classic/step_create_instance.go | 4 ++-- builder/oracle/classic/step_create_ip_reservation.go | 5 +++-- builder/oracle/classic/step_security.go | 2 +- builder/oracle/classic/step_snapshot.go | 2 +- 5 files changed, 9 insertions(+), 13 deletions(-) diff --git a/builder/oracle/classic/step_add_keys.go b/builder/oracle/classic/step_add_keys.go index b8331eb94..544c35bf2 100644 --- a/builder/oracle/classic/step_add_keys.go +++ b/builder/oracle/classic/step_add_keys.go @@ -6,7 +6,7 @@ import ( "strings" "github.com/hashicorp/go-oracle-terraform/compute" - uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/packer/common/uuid" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" ) @@ -23,13 +23,8 @@ func (s *stepAddKeysToAPI) Run(_ context.Context, state multistep.StateBag) mult sshPublicKey := strings.TrimSpace(state.Get("publicKey").(string)) // form API call to add key to compute cloud - uuid_string, err := uuid.GenerateUUID() - if err != nil { - ui.Error(fmt.Sprintf("Error creating unique SSH key name: %s", err.Error())) - return multistep.ActionHalt - } sshKeyName := fmt.Sprintf("/Compute-%s/%s/packer_generated_key_%s", - config.IdentityDomain, config.Username, uuid_string) + config.IdentityDomain, config.Username, uuid.TimeOrderedUUID()) ui.Say(fmt.Sprintf("Creating temporary key: %s", sshKeyName)) diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index fd70c6afa..ea840cb22 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -49,7 +49,7 @@ func (s *stepCreateInstance) Run(_ context.Context, state multistep.StateBag) mu } state.Put("instance_id", instanceInfo.ID) - ui.Say(fmt.Sprintf("Created instance (%s).", instanceInfo.ID)) + ui.Message(fmt.Sprintf("Created instance: %s.", instanceInfo.ID)) return multistep.ActionContinue } @@ -60,7 +60,7 @@ func (s *stepCreateInstance) Cleanup(state multistep.StateBag) { config := state.Get("config").(*Config) imID := state.Get("instance_id").(string) - ui.Say(fmt.Sprintf("Terminating instance (%s)...", imID)) + ui.Say("Terminating source instance...") instanceClient := client.Instances() input := &compute.DeleteInstanceInput{ diff --git a/builder/oracle/classic/step_create_ip_reservation.go b/builder/oracle/classic/step_create_ip_reservation.go index 8e23be785..cee4a62e8 100644 --- a/builder/oracle/classic/step_create_ip_reservation.go +++ b/builder/oracle/classic/step_create_ip_reservation.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/packer/common/uuid" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" ) @@ -19,8 +20,8 @@ func (s *stepCreateIPReservation) Run(_ context.Context, state multistep.StateBa iprClient := client.IPReservations() // TODO: add optional Name and Tags - ipresName := fmt.Sprintf("ipres_%s", config.ImageName) - ui.Say(fmt.Sprintf("Creating IP reservation: %s", ipresName)) + ipresName := fmt.Sprintf("ipres_%s_%s", config.ImageName, uuid.TimeOrderedUUID()) + ui.Say(fmt.Sprintf("Creating temporary IP reservation: %s", ipresName)) IPInput := &compute.CreateIPReservationInput{ ParentPool: compute.PublicReservationPool, diff --git a/builder/oracle/classic/step_security.go b/builder/oracle/classic/step_security.go index dae341ba6..22d74de08 100644 --- a/builder/oracle/classic/step_security.go +++ b/builder/oracle/classic/step_security.go @@ -71,7 +71,7 @@ func (s *stepSecurity) Run(_ context.Context, state multistep.StateBag) multiste func (s *stepSecurity) Cleanup(state multistep.StateBag) { client := state.Get("client").(*compute.ComputeClient) ui := state.Get("ui").(packer.Ui) - ui.Say("Deleting the packer-generated security rules and lists...") + ui.Say("Deleting temporary rules and lists...") // delete security rules that Packer generated secRuleName := state.Get("security_rule_name").(string) diff --git a/builder/oracle/classic/step_snapshot.go b/builder/oracle/classic/step_snapshot.go index 30a7d32c8..602ebd1ad 100644 --- a/builder/oracle/classic/step_snapshot.go +++ b/builder/oracle/classic/step_snapshot.go @@ -37,7 +37,7 @@ func (s *stepSnapshot) Run(_ context.Context, state multistep.StateBag) multiste } state.Put("snapshot", snap) - ui.Say(fmt.Sprintf("Created snapshot (%s).", snap.Name)) + ui.Message(fmt.Sprintf("Created snapshot: %s.", snap.Name)) return multistep.ActionContinue } From 98857c42cfe1c0ba7484804fdd4af543a3a6a7ec Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 26 Jan 2018 14:27:18 -0800 Subject: [PATCH 114/251] add tests; fix a couple issues caught by said tests --- builder/oracle/classic/config.go | 8 ++++ builder/oracle/classic/config_test.go | 59 +++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) create mode 100644 builder/oracle/classic/config_test.go diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index 540f24503..695307338 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -56,6 +56,10 @@ func NewConfig(raws ...interface{}) (*Config, error) { c.SSHSourceList = "seciplist:/oracle/public/public-internet" } + if c.Comm.SSHUsername == "" { + c.Comm.SSHUsername = "opc" + } + // Validate that all required fields are present var errs *packer.MultiError required := map[string]string{ @@ -76,5 +80,9 @@ func NewConfig(raws ...interface{}) (*Config, error) { errs = packer.MultiErrorAppend(errs, es...) } + if errs != nil && len(errs.Errors) > 0 { + return nil, errs + } + return c, nil } diff --git a/builder/oracle/classic/config_test.go b/builder/oracle/classic/config_test.go new file mode 100644 index 000000000..195e104a8 --- /dev/null +++ b/builder/oracle/classic/config_test.go @@ -0,0 +1,59 @@ +package classic + +import ( + "testing" +) + +func testConfig() map[string]interface{} { + return map[string]interface{}{ + "identity_domain": "abc12345", + "username": "test@hashicorp.com", + "password": "testpassword123", + "api_endpoint": "https://api-test.compute.test.oraclecloud.com/", + "image_list": "/oracle/public/myimage", + "shape": "oc3", + "image_name": "TestImageName", + "ssh_username": "opc", + } +} + +func TestConfigAutoFillsSourceList(t *testing.T) { + tc := testConfig() + conf, err := NewConfig(tc) + if err != nil { + t.Fatalf("Should not have error: %s", err.Error()) + } + if conf.SSHSourceList != "seciplist:/oracle/public/public-internet" { + t.Fatalf("conf.SSHSourceList should have been "+ + "\"seciplist:/oracle/public/public-internet\" but is \"%s\"", + conf.SSHSourceList) + } +} + +func TestConfigValidationCatchesMissing(t *testing.T) { + required := []string{ + "username", + "password", + "api_endpoint", + "identity_domain", + "image_list", + "shape", + } + for _, key := range required { + tc := testConfig() + delete(tc, key) + _, err := NewConfig(tc) + if err == nil { + t.Fatalf("Test should have failed when config lacked %s!", key) + } + } +} + +func TestValidationsIgnoresOptional(t *testing.T) { + tc := testConfig() + delete(tc, "ssh_username") + _, err := NewConfig(tc) + if err != nil { + t.Fatalf("Test shouldn't care if ssh_username is missing: err: %#v", err.Error()) + } +} From 565b660b19d03c61b2dcbad244cbf323f27b8855 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 26 Jan 2018 14:28:27 -0800 Subject: [PATCH 115/251] comments --- builder/oracle/classic/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index 695307338..7b2f7dfb2 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -51,11 +51,11 @@ func NewConfig(raws ...interface{}) (*Config, error) { if err != nil { return nil, fmt.Errorf("Error parsing API Endpoint: %s", err) } - // set default source list + // Set default source list if c.SSHSourceList == "" { c.SSHSourceList = "seciplist:/oracle/public/public-internet" } - + // Use default oracle username with sudo privileges if c.Comm.SSHUsername == "" { c.Comm.SSHUsername = "opc" } From 3ee1aa3ed62ce8082a5e2b92aa7258ab5d21ef35 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 26 Jan 2018 15:18:33 -0800 Subject: [PATCH 116/251] clean up ip reservations --- builder/oracle/classic/config_test.go | 2 +- .../oracle/classic/step_create_ip_reservation.go | 13 ++++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/builder/oracle/classic/config_test.go b/builder/oracle/classic/config_test.go index 195e104a8..81b8f0344 100644 --- a/builder/oracle/classic/config_test.go +++ b/builder/oracle/classic/config_test.go @@ -54,6 +54,6 @@ func TestValidationsIgnoresOptional(t *testing.T) { delete(tc, "ssh_username") _, err := NewConfig(tc) if err != nil { - t.Fatalf("Test shouldn't care if ssh_username is missing: err: %#v", err.Error()) + t.Fatalf("Shouldn't care if ssh_username is missing: err: %#v", err.Error()) } } diff --git a/builder/oracle/classic/step_create_ip_reservation.go b/builder/oracle/classic/step_create_ip_reservation.go index cee4a62e8..ca324b7e0 100644 --- a/builder/oracle/classic/step_create_ip_reservation.go +++ b/builder/oracle/classic/step_create_ip_reservation.go @@ -42,5 +42,16 @@ func (s *stepCreateIPReservation) Run(_ context.Context, state multistep.StateBa } func (s *stepCreateIPReservation) Cleanup(state multistep.StateBag) { - // TODO: delete ip reservation + ui := state.Get("ui").(packer.Ui) + ui.Message("Cleaning up IP reservations...") + client := state.Get("client").(*compute.ComputeClient) + + ipResName := state.Get("ipres_name").(string) + input := compute.DeleteIPReservationInput{Name: ipResName} + ipClient := client.IPReservations() + err := ipClient.DeleteIPReservation(&input) + if err != nil { + fmt.Printf("error deleting IP reservation: %s", err.Error()) + } + } From 56c6fed42a3b1ae5683a1d0c2e113ba9d1c4f037 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 26 Jan 2018 15:20:12 -0800 Subject: [PATCH 117/251] ui.say vs ui.message --- builder/oracle/classic/step_create_ip_reservation.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/oracle/classic/step_create_ip_reservation.go b/builder/oracle/classic/step_create_ip_reservation.go index ca324b7e0..74466a39f 100644 --- a/builder/oracle/classic/step_create_ip_reservation.go +++ b/builder/oracle/classic/step_create_ip_reservation.go @@ -21,7 +21,7 @@ func (s *stepCreateIPReservation) Run(_ context.Context, state multistep.StateBa // TODO: add optional Name and Tags ipresName := fmt.Sprintf("ipres_%s_%s", config.ImageName, uuid.TimeOrderedUUID()) - ui.Say(fmt.Sprintf("Creating temporary IP reservation: %s", ipresName)) + ui.Message(fmt.Sprintf("Creating temporary IP reservation: %s", ipresName)) IPInput := &compute.CreateIPReservationInput{ ParentPool: compute.PublicReservationPool, @@ -43,7 +43,7 @@ func (s *stepCreateIPReservation) Run(_ context.Context, state multistep.StateBa func (s *stepCreateIPReservation) Cleanup(state multistep.StateBag) { ui := state.Get("ui").(packer.Ui) - ui.Message("Cleaning up IP reservations...") + ui.Say("Cleaning up IP reservations...") client := state.Get("client").(*compute.ComputeClient) ipResName := state.Get("ipres_name").(string) From 0868c21d13da62ddf32ac47342360f530f41793b Mon Sep 17 00:00:00 2001 From: SharePointOscar Date: Sat, 27 Jan 2018 12:49:41 -0800 Subject: [PATCH 118/251] updated to use new Azure CLI 'az' and respective commands --- contrib/azure-setup.sh | 66 +++++++++++++++++++++++++----------------- 1 file changed, 39 insertions(+), 27 deletions(-) diff --git a/contrib/azure-setup.sh b/contrib/azure-setup.sh index b5f6de220..4c4d6ee6b 100755 --- a/contrib/azure-setup.sh +++ b/contrib/azure-setup.sh @@ -15,9 +15,9 @@ azureversion= create_sleep=10 showhelp() { - echo "azure-setup" + echo "az-setup" echo "" - echo " azure-setup helps you generate packer credentials for Azure" + echo " az-setup helps you generate packer credentials for az" echo "" echo " The script creates a resource group, storage account, application" echo " (client), service principal, and permissions and displays a snippet" @@ -25,37 +25,37 @@ showhelp() { echo "" echo " For simplicity we make a lot of assumptions and choose reasonable" echo " defaults. If you want more control over what happens, please use" - echo " the azure-cli directly." + echo " the az-cli directly." echo "" - echo " Note that you must already have an Azure account, username," + echo " Note that you must already have an az account, username," echo " password, and subscription. You can create those here:" echo "" echo " - https://account.windowsazure.com/" echo "" echo "REQUIREMENTS" echo "" - echo " - azure-cli" + echo " - az-cli" echo " - jq" echo "" echo " Use the requirements command (below) for more info." echo "" echo "USAGE" echo "" - echo " ./azure-setup.sh requirements" - echo " ./azure-setup.sh setup" + echo " ./az-setup.sh requirements" + echo " ./az-setup.sh setup" echo "" } requirements() { found=0 - azureversion=$(azure -v) + azureversion=$(az -v) if [ $? -eq 0 ]; then found=$((found + 1)) - echo "Found azure-cli version: $azureversion" + echo "Found az-cli version: $azureversion" else - echo "azure-cli is missing. Please install azure-cli from" - echo "https://azure.microsoft.com/en-us/documentation/articles/xplat-cli-install/" + echo "az cli is missing. Please install az cli from" + echo "https://az.microsoft.com/en-us/documentation/articles/xplat-cli-install/" fi jqversion=$(jq --version) @@ -73,19 +73,20 @@ requirements() { } askSubscription() { - azure account list + az account list echo "" echo "Please enter the Id of the account you wish to use. If you do not see" echo "a valid account in the list press Ctrl+C to abort and create one." echo "If you leave this blank we will use the Current account." echo -n "> " read azure_subscription_id + if [ "$azure_subscription_id" != "" ]; then - azure account set $azure_subscription_id + az account set --subscription $azure_subscription_id else - azure_subscription_id=$(azure account show --json | jq -r .[].id) + azure_subscription_id=$(az account list | jq -r .[].id) fi - azure_tenant_id=$(azure account show --json | jq -r .[].tenantId) + azure_tenant_id=$(az account list | jq -r '.[] | select(.tenantId) | .tenantId') echo "Using subscription_id: $azure_subscription_id" echo "Using tenant_id: $azure_tenant_id" } @@ -118,16 +119,16 @@ askSecret() { } askLocation() { - azure location list + az account list-locations echo "" - echo "Choose which region your resource group and storage account will be created." + echo "Choose which region your resource group and storage account will be created. example: westus" echo -n "> " read location } createResourceGroup() { echo "==> Creating resource group" - azure group create -n $meta_name -l $location + az group create -n $meta_name -l $location if [ $? -eq 0 ]; then azure_group_name=$meta_name else @@ -138,7 +139,7 @@ createResourceGroup() { createStorageAccount() { echo "==> Creating storage account" - azure storage account create -g $meta_name -l $location --sku-name LRS --kind Storage $meta_name + az storage account create --name $meta_name --resource-group $meta_name --location $location --kind Storage if [ $? -eq 0 ]; then azure_storage_name=$meta_name else @@ -149,7 +150,17 @@ createStorageAccount() { createApplication() { echo "==> Creating application" - azure_client_id=$(azure ad app create -n $meta_name -i http://$meta_name --home-page http://$meta_name -p $azure_client_secret --json | jq -r .appId) + echo "==> Does application exist?" + azure_client_id=$(az ad app list | jq -r '.[] | select(.displayName | contains("'$meta_name'")) ') + + if [ "$azure_client_id" != "" ]; then + echo "==> application already exist, grab appId" + azure_client_id=$(az ad app list | jq -r '.[] | select(.displayName | contains("spfarmpacker")) .appId') + else + echo "==> application does not exist" + azure_client_id=$(az ad app create --display-name $meta_name --identifier-uris http://$meta_name --homepage http://$meta_name --password $azure_client_secret | jq -r .appId) + fi + if [ $? -ne 0 ]; then echo "Error creating application: $meta_name @ http://$meta_name" return 1 @@ -158,7 +169,7 @@ createApplication() { createServicePrincipal() { echo "==> Creating service principal" - # Azure CLI 0.10.2 introduced a breaking change, where appId must be supplied with the -a switch + # az CLI 0.10.2 introduced a breaking change, where appId must be supplied with the -a switch # prior version accepted appId as the only parameter without a switch newer_syntax=false IFS='.' read -ra azureversionsemver <<< "$azureversion" @@ -167,9 +178,11 @@ createServicePrincipal() { fi if [ "${newer_syntax}" = true ]; then - azure_object_id=$(azure ad sp create -a $azure_client_id --json | jq -r .objectId) + azure_object_id=$(az ad sp create --id $azure_client_id | jq -r .objectId) + echo $azure_object_id "was selected." else - azure_object_id=$(azure ad sp create $azure_client_id --json | jq -r .objectId) + azure_object_id=$(az ad sp create --id $azure_client_id | jq -r .objectId) + echo $azure_object_id "was selected." fi if [ $? -ne 0 ]; then @@ -180,10 +193,10 @@ createServicePrincipal() { createPermissions() { echo "==> Creating permissions" - azure role assignment create --objectId $azure_object_id -o "Owner" -c /subscriptions/$azure_subscription_id + az role assignment create --assignee $azure_object_id --role "Owner" --scope /subscriptions/$azure_subscription_id # We want to use this more conservative scope but it does not work with the # current implementation which uses temporary resource groups - # azure role assignment create --spn http://$meta_name -g $azure_group_name -o "API Management Service Contributor" + # az role assignment create --spn http://$meta_name -g $azure_group_name -o "API Management Service Contributor" if [ $? -ne 0 ]; then echo "Error creating permissions for: http://$meta_name" return 1 @@ -234,8 +247,7 @@ retryable() { setup() { requirements - azure config mode arm - azure login + #az login askSubscription askName From 6770bb8091241e7ae54c43f74ff7fdc922ebb273 Mon Sep 17 00:00:00 2001 From: SharePointOscar Date: Sat, 27 Jan 2018 12:55:16 -0800 Subject: [PATCH 119/251] changed verbiage on cli name --- contrib/azure-setup.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/contrib/azure-setup.sh b/contrib/azure-setup.sh index 4c4d6ee6b..ec5f1b896 100755 --- a/contrib/azure-setup.sh +++ b/contrib/azure-setup.sh @@ -15,9 +15,9 @@ azureversion= create_sleep=10 showhelp() { - echo "az-setup" + echo "azure-setup" echo "" - echo " az-setup helps you generate packer credentials for az" + echo " azure-setup helps you generate packer credentials for azure" echo "" echo " The script creates a resource group, storage account, application" echo " (client), service principal, and permissions and displays a snippet" @@ -25,7 +25,7 @@ showhelp() { echo "" echo " For simplicity we make a lot of assumptions and choose reasonable" echo " defaults. If you want more control over what happens, please use" - echo " the az-cli directly." + echo " the azure cli directly." echo "" echo " Note that you must already have an az account, username," echo " password, and subscription. You can create those here:" @@ -34,15 +34,15 @@ showhelp() { echo "" echo "REQUIREMENTS" echo "" - echo " - az-cli" + echo " - azure-cli" echo " - jq" echo "" echo " Use the requirements command (below) for more info." echo "" echo "USAGE" echo "" - echo " ./az-setup.sh requirements" - echo " ./az-setup.sh setup" + echo " ./azure-setup.sh requirements" + echo " ./azure-setup.sh setup" echo "" } @@ -52,9 +52,9 @@ requirements() { azureversion=$(az -v) if [ $? -eq 0 ]; then found=$((found + 1)) - echo "Found az-cli version: $azureversion" + echo "Found azure-cli version: $azureversion" else - echo "az cli is missing. Please install az cli from" + echo "azure cli is missing. Please install azure cli from" echo "https://az.microsoft.com/en-us/documentation/articles/xplat-cli-install/" fi From 6327ab6f3d7fe0a335e41d501420c01febebc54c Mon Sep 17 00:00:00 2001 From: SharePointOscar Date: Sat, 27 Jan 2018 12:56:43 -0800 Subject: [PATCH 120/251] changed verbiage on cli name --- contrib/azure-setup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/azure-setup.sh b/contrib/azure-setup.sh index ec5f1b896..7e6817e74 100755 --- a/contrib/azure-setup.sh +++ b/contrib/azure-setup.sh @@ -247,7 +247,7 @@ retryable() { setup() { requirements - #az login + az login askSubscription askName From 361a175fc9c8ff80016bae86c3a437554d1f797c Mon Sep 17 00:00:00 2001 From: Oscar Medina Date: Sun, 28 Jan 2018 07:49:58 -0800 Subject: [PATCH 121/251] Removed static value from code The parameter within the contains() should not be hard-coded, using $meta_name variable value. --- contrib/azure-setup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/azure-setup.sh b/contrib/azure-setup.sh index 7e6817e74..f30da9693 100755 --- a/contrib/azure-setup.sh +++ b/contrib/azure-setup.sh @@ -155,7 +155,7 @@ createApplication() { if [ "$azure_client_id" != "" ]; then echo "==> application already exist, grab appId" - azure_client_id=$(az ad app list | jq -r '.[] | select(.displayName | contains("spfarmpacker")) .appId') + azure_client_id=$(az ad app list | jq -r '.[] | select(.displayName | contains("'$meta_name'")) .appId') else echo "==> application does not exist" azure_client_id=$(az ad app create --display-name $meta_name --identifier-uris http://$meta_name --homepage http://$meta_name --password $azure_client_secret | jq -r .appId) From 705459c26060e98691db345a83ec9dc5e7f03e4f Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Mon, 29 Jan 2018 16:50:53 -0800 Subject: [PATCH 122/251] add snapshotted machine image to image lists, then delete the snapshot. --- builder/oracle/classic/builder.go | 1 + builder/oracle/classic/config.go | 22 +++--- .../oracle/classic/step_create_instance.go | 2 +- builder/oracle/classic/step_list_images.go | 70 +++++++++++++++++++ builder/oracle/classic/step_snapshot.go | 19 ++++- 5 files changed, 103 insertions(+), 11 deletions(-) create mode 100644 builder/oracle/classic/step_list_images.go diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go index 8537289cf..088ff4dbc 100644 --- a/builder/oracle/classic/builder.go +++ b/builder/oracle/classic/builder.go @@ -80,6 +80,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, &common.StepProvision{}, &stepSnapshot{}, + &stepListImages{}, } // Run the steps diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index 7b2f7dfb2..e0c82beb2 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -23,9 +23,13 @@ type Config struct { apiEndpointURL *url.URL // Image - ImageName string `mapstructure:"image_name"` - Shape string `mapstructure:"shape"` - ImageList string `mapstructure:"image_list"` + ImageName string `mapstructure:"image_name"` + Shape string `mapstructure:"shape"` + SourceImageList string `mapstructure:"source_image_list"` + DestImageList string `mapstructure:"dest_image_list"` + // Optional; if you don't enter anything, the image list description + // will read "Packer-built image list" + DestImageListDescription string `mapstructure:"dest_image_list_description` // Optional. Describes what computers are allowed to reach your instance // via SSH. This whitelist must contain the computer you're running Packer // from. It defaults to public-internet, meaning that you can SSH into your @@ -63,12 +67,12 @@ func NewConfig(raws ...interface{}) (*Config, error) { // Validate that all required fields are present var errs *packer.MultiError required := map[string]string{ - "username": c.Username, - "password": c.Password, - "api_endpoint": c.APIEndpoint, - "identity_domain": c.IdentityDomain, - "image_list": c.ImageList, - "shape": c.Shape, + "username": c.Username, + "password": c.Password, + "api_endpoint": c.APIEndpoint, + "identity_domain": c.IdentityDomain, + "source_image_list": c.SourceImageList, + "shape": c.Shape, } for k, v := range required { if v == "" { diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index ea840cb22..891fda33e 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -35,7 +35,7 @@ func (s *stepCreateInstance) Run(_ context.Context, state multistep.StateBag) mu input := &compute.CreateInstanceInput{ Name: config.ImageName, Shape: config.Shape, - ImageList: config.ImageList, + ImageList: config.SourceImageList, SSHKeys: []string{keyName}, Networking: map[string]compute.NetworkingInfo{"eth0": netInfo}, } diff --git a/builder/oracle/classic/step_list_images.go b/builder/oracle/classic/step_list_images.go new file mode 100644 index 000000000..8d0c5ee12 --- /dev/null +++ b/builder/oracle/classic/step_list_images.go @@ -0,0 +1,70 @@ +package classic + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +type stepListImages struct{} + +func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { + // get variables from state + ui := state.Get("ui").(packer.Ui) + config := state.Get("config").(*Config) + client := state.Get("client").(*compute.ComputeClient) + ui.Say("Adding image to image list...") + + // TODO: Try to get image list + imageListClient := client.ImageList() + getInput := compute.GetImageListInput{ + Name: config.DestImageList, + } + imList, err := imageListClient.GetImageList(&getInput) + if err != nil { + ui.Say(fmt.Sprintf(err.Error())) + // If the list didn't exist, create it. + ui.Say(fmt.Sprintf("Creating image list: %s", config.DestImageList)) + + ilInput := compute.CreateImageListInput{ + Name: config.DestImageList, + Description: "Packer-built image list", + } + + // Load the packer-generated SSH key into the Oracle Compute cloud. + imList, err = imageListClient.CreateImageList(&ilInput) + if err != nil { + err = fmt.Errorf("Problem creating an image list through Oracle's API: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } + ui.Message(fmt.Sprintf("Image list %s created!", imList.URI)) + } + + // Now create and image list entry for the image into that list. + snap := state.Get("snapshot").(*compute.Snapshot) + entriesClient := client.ImageListEntries() + entriesInput := compute.CreateImageListEntryInput{ + Name: config.DestImageList, + MachineImages: []string{fmt.Sprintf("Compute-%s/%s/%s", config.IdentityDomain, config.Username, snap.MachineImage)}, + Version: 1, + } + entryInfo, err := entriesClient.CreateImageListEntry(&entriesInput) + if err != nil { + err = fmt.Errorf("Problem creating an image list entry: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } + ui.Message(fmt.Sprintf("created image list entry %s", entryInfo.Name)) + return multistep.ActionContinue +} + +func (s *stepListImages) Cleanup(state multistep.StateBag) { + // Nothing to do + return +} diff --git a/builder/oracle/classic/step_snapshot.go b/builder/oracle/classic/step_snapshot.go index 602ebd1ad..b98530691 100644 --- a/builder/oracle/classic/step_snapshot.go +++ b/builder/oracle/classic/step_snapshot.go @@ -42,5 +42,22 @@ func (s *stepSnapshot) Run(_ context.Context, state multistep.StateBag) multiste } func (s *stepSnapshot) Cleanup(state multistep.StateBag) { - // Nothing to do + // Delete the snapshot + ui := state.Get("ui").(packer.Ui) + ui.Say("Creating Snapshot...") + client := state.Get("client").(*compute.ComputeClient) + snap := state.Get("snapshot").(*compute.Snapshot) + snapClient := client.Snapshots() + snapInput := compute.DeleteSnapshotInput{ + Snapshot: snap.Name, + MachineImage: snap.MachineImage, + } + machineClient := client.MachineImages() + err := snapClient.DeleteSnapshot(machineClient, &snapInput) + if err != nil { + err = fmt.Errorf("Problem deleting snapshot: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + } + return } From cab52872f499289d9c5883b322f9a77dbee0bb61 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 30 Jan 2018 22:00:37 -0800 Subject: [PATCH 123/251] add session level keep-alives for ssh communicator --- communicator/ssh/communicator.go | 18 ++++++++++++++++++ helper/communicator/config.go | 5 +++++ helper/communicator/step_connect_ssh.go | 1 + .../source/docs/templates/communicator.html.md | 4 ++++ 4 files changed, 28 insertions(+) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index c10e97dcc..eabf32aea 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -55,6 +55,10 @@ type Config struct { // UseSftp, if true, sftp will be used instead of scp for file transfers UseSftp bool + + // KeepAliveInterval sets how often we send a channel request to the + // server. A value < 0 disables. + KeepAliveInterval time.Duration } // Creates a new packer.Communicator implementation over SSH. This takes @@ -104,6 +108,20 @@ func (c *comm) Start(cmd *packer.RemoteCmd) (err error) { return } + go func() { + if c.config.KeepAliveInterval < 0 { + return + } + c := time.NewTicker(c.config.KeepAliveInterval) + defer c.Stop() + for range c.C { + _, err := session.SendRequest("keepalive@packer.io", true, nil) + if err != nil { + return + } + } + }() + // Start a goroutine to wait for the session to end and set the // exit boolean and status. go func() { diff --git a/helper/communicator/config.go b/helper/communicator/config.go index 5ecbdfc65..8fe1032cb 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -37,6 +37,7 @@ type Config struct { SSHProxyPort int `mapstructure:"ssh_proxy_port"` SSHProxyUsername string `mapstructure:"ssh_proxy_username"` SSHProxyPassword string `mapstructure:"ssh_proxy_password"` + SSHKeepAliveInterval time.Duration `mapstructure:"ssh_keep_alive_interval"` // WinRM WinRMUser string `mapstructure:"winrm_username"` @@ -131,6 +132,10 @@ func (c *Config) prepareSSH(ctx *interpolate.Context) []error { c.SSHTimeout = 5 * time.Minute } + if c.SSHKeepAliveInterval == 0 { + c.SSHKeepAliveInterval = 5 * time.Second + } + if c.SSHHandshakeAttempts == 0 { c.SSHHandshakeAttempts = 10 } diff --git a/helper/communicator/step_connect_ssh.go b/helper/communicator/step_connect_ssh.go index 680b88a02..192dba739 100644 --- a/helper/communicator/step_connect_ssh.go +++ b/helper/communicator/step_connect_ssh.go @@ -182,6 +182,7 @@ func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, cancel <-chan stru Pty: s.Config.SSHPty, DisableAgentForwarding: s.Config.SSHDisableAgentForwarding, UseSftp: s.Config.SSHFileTransferMethod == "sftp", + KeepAliveInterval: s.Config.SSHKeepAliveInterval, } log.Println("[INFO] Attempting SSH connection...") diff --git a/website/source/docs/templates/communicator.html.md b/website/source/docs/templates/communicator.html.md index 6bb5150b9..e8a5d56fc 100644 --- a/website/source/docs/templates/communicator.html.md +++ b/website/source/docs/templates/communicator.html.md @@ -91,6 +91,10 @@ The SSH communicator has the following options: - `ssh_host` (string) - The address to SSH to. This usually is automatically configured by the builder. +* `ssh_keep_alive_interval` (string) - How often to send "keep alive" + messages to the server. Set to a negative value (`-1`) to disable. Example value: + "10s". Defaults to "5s". + - `ssh_password` (string) - A plaintext password to use to authenticate with SSH. From 55977be8f37427b4ff1fb1ab3084e601f76ead92 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 30 Jan 2018 23:26:26 -0800 Subject: [PATCH 124/251] Update communicator.html.md --- website/source/docs/templates/communicator.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/templates/communicator.html.md b/website/source/docs/templates/communicator.html.md index e8a5d56fc..5fbb8a3d5 100644 --- a/website/source/docs/templates/communicator.html.md +++ b/website/source/docs/templates/communicator.html.md @@ -92,7 +92,7 @@ The SSH communicator has the following options: configured by the builder. * `ssh_keep_alive_interval` (string) - How often to send "keep alive" - messages to the server. Set to a negative value (`-1`) to disable. Example value: + messages to the server. Set to a negative value (`-1s`) to disable. Example value: "10s". Defaults to "5s". - `ssh_password` (string) - A plaintext password to use to authenticate From 2452f0788bc841aef284660ea917acc8d300bdaa Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 30 Jan 2018 23:30:25 -0800 Subject: [PATCH 125/251] remove obsolete ssh_wait_timeout from examples --- examples/alicloud/local/centos.json | 2 +- website/source/docs/builders/parallels-iso.html.md | 2 +- website/source/docs/builders/parallels-pvm.html.md | 2 +- website/source/docs/builders/qemu.html.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/alicloud/local/centos.json b/examples/alicloud/local/centos.json index 45c19b3dc..07b2f2578 100644 --- a/examples/alicloud/local/centos.json +++ b/examples/alicloud/local/centos.json @@ -37,7 +37,7 @@ "ssh_password": "vagrant", "ssh_port": 22, "ssh_username": "root", - "ssh_wait_timeout": "10000s", + "ssh_timeout": "10000s", "type": "qemu", "vm_name": "{{ user `template` }}.raw", "net_device": "virtio-net", diff --git a/website/source/docs/builders/parallels-iso.html.md b/website/source/docs/builders/parallels-iso.html.md index c9c23d919..e7f0dd4a4 100644 --- a/website/source/docs/builders/parallels-iso.html.md +++ b/website/source/docs/builders/parallels-iso.html.md @@ -37,7 +37,7 @@ self-install. Still, the example serves to show the basic configuration: "parallels_tools_flavor": "lin", "ssh_username": "packer", "ssh_password": "packer", - "ssh_wait_timeout": "30s", + "ssh_timeout": "30s", "shutdown_command": "echo 'packer' | sudo -S shutdown -P now" } ``` diff --git a/website/source/docs/builders/parallels-pvm.html.md b/website/source/docs/builders/parallels-pvm.html.md index ba3d9c1a9..59accdcd1 100644 --- a/website/source/docs/builders/parallels-pvm.html.md +++ b/website/source/docs/builders/parallels-pvm.html.md @@ -33,7 +33,7 @@ the settings here. "source_path": "source.pvm", "ssh_username": "packer", "ssh_password": "packer", - "ssh_wait_timeout": "30s", + "ssh_timeout": "30s", "shutdown_command": "echo 'packer' | sudo -S shutdown -P now" } ``` diff --git a/website/source/docs/builders/qemu.html.md b/website/source/docs/builders/qemu.html.md index f3de7cf9c..b8f0ab368 100644 --- a/website/source/docs/builders/qemu.html.md +++ b/website/source/docs/builders/qemu.html.md @@ -47,7 +47,7 @@ to files, URLS for ISOs and checksums. "ssh_username": "root", "ssh_password": "s0m3password", "ssh_port": 22, - "ssh_wait_timeout": "30s", + "ssh_timeout": "30s", "vm_name": "tdhtest", "net_device": "virtio-net", "disk_interface": "virtio", From 1f92aa2c0a8ded78488c33853477923023387bd4 Mon Sep 17 00:00:00 2001 From: Evan Machnic Date: Wed, 31 Jan 2018 10:49:04 -0600 Subject: [PATCH 126/251] Added Policyfile support to chef-client provisioner --- provisioner/chef-client/provisioner.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index 7feab150c..51c315ca1 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -56,6 +56,8 @@ type Config struct { InstallCommand string `mapstructure:"install_command"` KnifeCommand string `mapstructure:"knife_command"` NodeName string `mapstructure:"node_name"` + PolicyGroup string `mapstructure:"policy_group"` + PolicyName string `mapstructure:"policy_name"` PreventSudo bool `mapstructure:"prevent_sudo"` RunList []string `mapstructure:"run_list"` ServerUrl string `mapstructure:"server_url"` @@ -82,6 +84,8 @@ type ConfigTemplate struct { ClientKey string EncryptedDataBagSecretPath string NodeName string + PolicyGroup string + PolicyName string ServerUrl string SslVerifyMode string TrustedCertsDir string @@ -270,6 +274,8 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment, + p.config.PolicyGroup, + p.config.PolicyName, p.config.SslVerifyMode, p.config.TrustedCertsDir) if err != nil { @@ -374,6 +380,8 @@ func (p *Provisioner) createConfig( ValidationKeyPath: remoteKeyPath, ValidationClientName: validationClientName, ChefEnvironment: chefEnvironment, + PolicyGroup: policyGroup, + PolicyName: policyName, SslVerifyMode: sslVerifyMode, TrustedCertsDir: trustedCertsDir, EncryptedDataBagSecretPath: encryptedDataBagSecretPath, @@ -688,6 +696,12 @@ node_name "{{.NodeName}}" {{if ne .ChefEnvironment ""}} environment "{{.ChefEnvironment}}" {{end}} +{{if ne .PolicyGroup ""}} +policy_group "{{.PolicyGroup}}" +{{end}} +{{if ne .PolicyName ""}} +policy_name "{{.PolicyName}}" +{{end}} {{if ne .SslVerifyMode ""}} ssl_verify_mode :{{.SslVerifyMode}} {{end}} From fe90f797041b7fe555bdbccc63818288b6a080c5 Mon Sep 17 00:00:00 2001 From: Evan Machnic Date: Wed, 31 Jan 2018 10:54:40 -0600 Subject: [PATCH 127/251] Changed new code to use tabs instead of spaces --- provisioner/chef-client/provisioner.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index 51c315ca1..950bf7ea5 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -56,8 +56,8 @@ type Config struct { InstallCommand string `mapstructure:"install_command"` KnifeCommand string `mapstructure:"knife_command"` NodeName string `mapstructure:"node_name"` - PolicyGroup string `mapstructure:"policy_group"` - PolicyName string `mapstructure:"policy_name"` + PolicyGroup string `mapstructure:"policy_group"` + PolicyName string `mapstructure:"policy_name"` PreventSudo bool `mapstructure:"prevent_sudo"` RunList []string `mapstructure:"run_list"` ServerUrl string `mapstructure:"server_url"` @@ -84,8 +84,8 @@ type ConfigTemplate struct { ClientKey string EncryptedDataBagSecretPath string NodeName string - PolicyGroup string - PolicyName string + PolicyGroup string + PolicyName string ServerUrl string SslVerifyMode string TrustedCertsDir string @@ -274,8 +274,8 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment, - p.config.PolicyGroup, - p.config.PolicyName, + p.config.PolicyGroup, + p.config.PolicyName, p.config.SslVerifyMode, p.config.TrustedCertsDir) if err != nil { @@ -380,8 +380,8 @@ func (p *Provisioner) createConfig( ValidationKeyPath: remoteKeyPath, ValidationClientName: validationClientName, ChefEnvironment: chefEnvironment, - PolicyGroup: policyGroup, - PolicyName: policyName, + PolicyGroup: policyGroup, + PolicyName: policyName, SslVerifyMode: sslVerifyMode, TrustedCertsDir: trustedCertsDir, EncryptedDataBagSecretPath: encryptedDataBagSecretPath, From 735424793398d8c865459207426613087be21a05 Mon Sep 17 00:00:00 2001 From: Evan Machnic Date: Wed, 31 Jan 2018 11:01:08 -0600 Subject: [PATCH 128/251] Added policyGroup and policyName to Packer Communicator --- provisioner/chef-client/provisioner.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index 950bf7ea5..b82bb939b 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -350,6 +350,8 @@ func (p *Provisioner) createConfig( remoteKeyPath string, validationClientName string, chefEnvironment string, + policyGroup string, + policyName string, sslVerifyMode string, trustedCertsDir string) (string, error) { From 5cedfc4557acea8043578ff3d6c8fba97cdbc575 Mon Sep 17 00:00:00 2001 From: Evan Machnic Date: Wed, 31 Jan 2018 11:06:31 -0600 Subject: [PATCH 129/251] Fixed go format errors --- provisioner/chef-client/provisioner.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index b82bb939b..27d94c6c3 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -350,8 +350,8 @@ func (p *Provisioner) createConfig( remoteKeyPath string, validationClientName string, chefEnvironment string, - policyGroup string, - policyName string, + policyGroup string, + policyName string, sslVerifyMode string, trustedCertsDir string) (string, error) { From 871ead371a67446aa0b62d7e8f213cdedd1c927b Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 31 Jan 2018 10:47:19 -0800 Subject: [PATCH 130/251] Clean up based on Oracle comments --- builder/oracle/classic/artifact.go | 20 +++++++------ builder/oracle/classic/builder.go | 6 ++-- builder/oracle/classic/config.go | 3 +- builder/oracle/classic/config_test.go | 20 +++++++------ .../oracle/classic/step_create_instance.go | 2 -- builder/oracle/classic/step_list_images.go | 30 ++++++++++++++++--- builder/oracle/classic/step_snapshot.go | 4 +-- .../go-oracle-terraform/compute/snapshots.go | 25 ++++++++++++++++ vendor/vendor.json | 2 +- .../docs/builders/oracle-classic.html.md | 10 +++++-- 10 files changed, 90 insertions(+), 32 deletions(-) diff --git a/builder/oracle/classic/artifact.go b/builder/oracle/classic/artifact.go index fb7861b04..82987be1a 100644 --- a/builder/oracle/classic/artifact.go +++ b/builder/oracle/classic/artifact.go @@ -6,10 +6,13 @@ import ( "github.com/hashicorp/go-oracle-terraform/compute" ) -// Artifact is an artifact implementation that contains a Snapshot. +// Artifact is an artifact implementation that contains Image List +// and Machine Image info. type Artifact struct { - Snapshot *compute.Snapshot - driver *compute.ComputeClient + MachineImageName string + MachineImageFile string + ImageListVersion int + driver *compute.ComputeClient } // BuilderId uniquely identifies the builder. @@ -24,16 +27,15 @@ func (a *Artifact) Files() []string { } func (a *Artifact) Id() string { - return a.Snapshot.Name + return a.MachineImageName } func (a *Artifact) String() string { - return fmt.Sprintf("A Snapshot was created: \n"+ + return fmt.Sprintf("An image list entry was created: \n"+ "Name: %s\n"+ - "Instance: %s\n"+ - "MachineImage: %s\n"+ - "URI: %s", - a.Snapshot.Name, a.Snapshot.Instance, a.Snapshot.MachineImage, a.Snapshot.URI) + "File: %s\n"+ + "Version: %d", + a.MachineImageName, a.MachineImageFile, a.ImageListVersion) } func (a *Artifact) State(name string) interface{} { diff --git a/builder/oracle/classic/builder.go b/builder/oracle/classic/builder.go index 088ff4dbc..1e90439be 100644 --- a/builder/oracle/classic/builder.go +++ b/builder/oracle/classic/builder.go @@ -99,8 +99,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe // Build the artifact and return it artifact := &Artifact{ - Snapshot: state.Get("snapshot").(*compute.Snapshot), - driver: client, + ImageListVersion: state.Get("image_list_version").(int), + MachineImageName: state.Get("machine_image_name").(string), + MachineImageFile: state.Get("machine_image_file").(string), + driver: client, } return artifact, nil diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index e0c82beb2..c04235da9 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -29,7 +29,7 @@ type Config struct { DestImageList string `mapstructure:"dest_image_list"` // Optional; if you don't enter anything, the image list description // will read "Packer-built image list" - DestImageListDescription string `mapstructure:"dest_image_list_description` + DestImageListDescription string `mapstructure:"dest_image_list_description"` // Optional. Describes what computers are allowed to reach your instance // via SSH. This whitelist must contain the computer you're running Packer // from. It defaults to public-internet, meaning that you can SSH into your @@ -72,6 +72,7 @@ func NewConfig(raws ...interface{}) (*Config, error) { "api_endpoint": c.APIEndpoint, "identity_domain": c.IdentityDomain, "source_image_list": c.SourceImageList, + "dest_image_list": c.DestImageList, "shape": c.Shape, } for k, v := range required { diff --git a/builder/oracle/classic/config_test.go b/builder/oracle/classic/config_test.go index 81b8f0344..214365731 100644 --- a/builder/oracle/classic/config_test.go +++ b/builder/oracle/classic/config_test.go @@ -6,14 +6,15 @@ import ( func testConfig() map[string]interface{} { return map[string]interface{}{ - "identity_domain": "abc12345", - "username": "test@hashicorp.com", - "password": "testpassword123", - "api_endpoint": "https://api-test.compute.test.oraclecloud.com/", - "image_list": "/oracle/public/myimage", - "shape": "oc3", - "image_name": "TestImageName", - "ssh_username": "opc", + "identity_domain": "abc12345", + "username": "test@hashicorp.com", + "password": "testpassword123", + "api_endpoint": "https://api-test.compute.test.oraclecloud.com/", + "dest_image_list": "/Config-thing/myuser/myimage", + "source_image_list": "/oracle/public/whatever", + "shape": "oc3", + "image_name": "TestImageName", + "ssh_username": "opc", } } @@ -36,7 +37,8 @@ func TestConfigValidationCatchesMissing(t *testing.T) { "password", "api_endpoint", "identity_domain", - "image_list", + "dest_image_list", + "source_image_list", "shape", } for _, key := range required { diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index 891fda33e..041889ab1 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -3,7 +3,6 @@ package classic import ( "context" "fmt" - "log" "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/packer/helper/multistep" @@ -67,7 +66,6 @@ func (s *stepCreateInstance) Cleanup(state multistep.StateBag) { Name: config.ImageName, ID: imID, } - log.Printf("instance destroy input is %#v", input) err := instanceClient.DeleteInstance(input) if err != nil { diff --git a/builder/oracle/classic/step_list_images.go b/builder/oracle/classic/step_list_images.go index 8d0c5ee12..a32355e79 100644 --- a/builder/oracle/classic/step_list_images.go +++ b/builder/oracle/classic/step_list_images.go @@ -18,7 +18,6 @@ func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multis client := state.Get("client").(*compute.ComputeClient) ui.Say("Adding image to image list...") - // TODO: Try to get image list imageListClient := client.ImageList() getInput := compute.GetImageListInput{ Name: config.DestImageList, @@ -27,7 +26,8 @@ func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multis if err != nil { ui.Say(fmt.Sprintf(err.Error())) // If the list didn't exist, create it. - ui.Say(fmt.Sprintf("Creating image list: %s", config.DestImageList)) + ui.Say(fmt.Sprintf("Destination image list %s does not exist; Creating it...", + config.DestImageList)) ilInput := compute.CreateImageListInput{ Name: config.DestImageList, @@ -37,7 +37,7 @@ func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multis // Load the packer-generated SSH key into the Oracle Compute cloud. imList, err = imageListClient.CreateImageList(&ilInput) if err != nil { - err = fmt.Errorf("Problem creating an image list through Oracle's API: %s", err) + err = fmt.Errorf("Problem creating image list: %s", err) ui.Error(err.Error()) state.Put("error", err) return multistep.ActionHalt @@ -47,11 +47,12 @@ func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multis // Now create and image list entry for the image into that list. snap := state.Get("snapshot").(*compute.Snapshot) + version := len(imList.Entries) + 1 entriesClient := client.ImageListEntries() entriesInput := compute.CreateImageListEntryInput{ Name: config.DestImageList, MachineImages: []string{fmt.Sprintf("Compute-%s/%s/%s", config.IdentityDomain, config.Username, snap.MachineImage)}, - Version: 1, + Version: version, } entryInfo, err := entriesClient.CreateImageListEntry(&entriesInput) if err != nil { @@ -60,7 +61,28 @@ func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multis state.Put("error", err) return multistep.ActionHalt } + state.Put("image_list_entry", entryInfo) ui.Message(fmt.Sprintf("created image list entry %s", entryInfo.Name)) + + imList, err = imageListClient.GetImageList(&getInput) + + machineImagesClient := client.MachineImages() + getImagesInput := compute.GetMachineImageInput{ + Name: config.ImageName, + } + + // Grab info about the machine image to return with the artifact + imInfo, err := machineImagesClient.GetMachineImage(&getImagesInput) + if err != nil { + err = fmt.Errorf("Problem getting machine image info: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } + state.Put("machine_image_file", imInfo.File) + state.Put("machine_image_name", imInfo.Name) + state.Put("image_list_version", version) + return multistep.ActionContinue } diff --git a/builder/oracle/classic/step_snapshot.go b/builder/oracle/classic/step_snapshot.go index b98530691..452fb56eb 100644 --- a/builder/oracle/classic/step_snapshot.go +++ b/builder/oracle/classic/step_snapshot.go @@ -52,8 +52,8 @@ func (s *stepSnapshot) Cleanup(state multistep.StateBag) { Snapshot: snap.Name, MachineImage: snap.MachineImage, } - machineClient := client.MachineImages() - err := snapClient.DeleteSnapshot(machineClient, &snapInput) + + err := snapClient.DeleteSnapshotResourceOnly(&snapInput) if err != nil { err = fmt.Errorf("Problem deleting snapshot: %s", err) ui.Error(err.Error()) diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/snapshots.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/snapshots.go index d2a9616e9..370fd0c97 100644 --- a/vendor/github.com/hashicorp/go-oracle-terraform/compute/snapshots.go +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/snapshots.go @@ -176,6 +176,31 @@ func (c *SnapshotsClient) DeleteSnapshot(machineImagesClient *MachineImagesClien return nil } +// DeleteSnapshot deletes the Snapshot with the given name. +// A machine image gets created with the associated snapshot is not deleted +// by this method. +func (c *SnapshotsClient) DeleteSnapshotResourceOnly(input *DeleteSnapshotInput) error { + // Wait for snapshot complete in case delay is active and the corresponding + // instance needs to be deleted first + getInput := &GetSnapshotInput{ + Name: input.Snapshot, + } + + if input.Timeout == 0 { + input.Timeout = WaitForSnapshotCompleteTimeout + } + + if _, err := c.WaitForSnapshotComplete(getInput, input.Timeout); err != nil { + return fmt.Errorf("Could not delete snapshot: %s", err) + } + + if err := c.deleteResource(input.Snapshot); err != nil { + return fmt.Errorf("Could not delete snapshot: %s", err) + } + + return nil +} + // WaitForSnapshotComplete waits for an snapshot to be completely initialized and available. func (c *SnapshotsClient) WaitForSnapshotComplete(input *GetSnapshotInput, timeout time.Duration) (*Snapshot, error) { var info *Snapshot diff --git a/vendor/vendor.json b/vendor/vendor.json index 6172b8c33..421c8710a 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -803,7 +803,7 @@ "revisionTime": "2018-01-11T20:31:13Z" }, { - "checksumSHA1": "RhoE7zmHsn5zoXbx5AsAUUQI72E=", + "checksumSHA1": "wce86V0j11J6xRSvJEanprjK7so=", "path": "github.com/hashicorp/go-oracle-terraform/compute", "revision": "5a9a298c54339d2296d2f1135eae55a3a8f5e8c2", "revisionTime": "2018-01-11T20:31:13Z" diff --git a/website/source/docs/builders/oracle-classic.html.md b/website/source/docs/builders/oracle-classic.html.md index 08d3d10b1..83169fcee 100644 --- a/website/source/docs/builders/oracle-classic.html.md +++ b/website/source/docs/builders/oracle-classic.html.md @@ -40,14 +40,17 @@ This builder currently only works with the SSH communicator. requests. Instructions for determining your API endpoint can be found [here](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsa/SendRequests.html) + - `dest_image_list` (string) - Where to save the machine image to once you've + provisioned it. If the provided image list does not exist, Packer will create it. + - `identity_domain` (string) - This is your customer-specific identity domain as generated by Oracle. If you don't know what your identity domain is, ask your account administrator. For a little more information, see the Oracle [documentation](https://docs.oracle.com/en/cloud/get-started/subscriptions-cloud/ocuid/identity-domain-overview.html#GUID-7969F881-5F4D-443E-B86C-9044C8085B8A). - - `image_list` (string) - This is what image you want to use as your base image. + - `source_image_list` (string) - This is what image you want to use as your base image. See the [documentation](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsg/listing-machine-images.html) - for more details. To see what public image lists are available, you can use + for more details. You may use either a public image list, or a private image list. To see what public image lists are available, you can use the CLI, as described [here](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stopc/image-lists-stclr-and-nmcli.html#GUID-DB7E75FE-F752-4FF7-AB70-3C8DCDFCA0FA) - `password` (string) - Your account password. @@ -60,6 +63,9 @@ This builder currently only works with the SSH communicator. ### Optional + - `dest_image_list_description` (string) - a description for your destination + image list. If you don't provide one, Packer will provide a generic description. + - `ssh_username` (string) - The username that Packer will use to SSH into the instance; defaults to `opc`, the default oracle user, which has sudo priveliges. If you have already configured users on your machine, you may From af26b312cdbcf5b3b0b0ed2fa0d3f34f5a5bd4eb Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 31 Jan 2018 11:35:34 -0800 Subject: [PATCH 131/251] fix logline --- builder/oracle/classic/step_snapshot.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/oracle/classic/step_snapshot.go b/builder/oracle/classic/step_snapshot.go index 452fb56eb..b7cbbf008 100644 --- a/builder/oracle/classic/step_snapshot.go +++ b/builder/oracle/classic/step_snapshot.go @@ -44,7 +44,7 @@ func (s *stepSnapshot) Run(_ context.Context, state multistep.StateBag) multiste func (s *stepSnapshot) Cleanup(state multistep.StateBag) { // Delete the snapshot ui := state.Get("ui").(packer.Ui) - ui.Say("Creating Snapshot...") + ui.Say("Deleting Snapshot...") client := state.Get("client").(*compute.ComputeClient) snap := state.Get("snapshot").(*compute.Snapshot) snapClient := client.Snapshots() From 64e26d6fa253cb838ce69cbdd55cd6a290893206 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 31 Jan 2018 12:23:25 -0800 Subject: [PATCH 132/251] update example file in the oracle-classic docs --- website/source/docs/builders/oracle-classic.html.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/source/docs/builders/oracle-classic.html.md b/website/source/docs/builders/oracle-classic.html.md index 83169fcee..ac539b255 100644 --- a/website/source/docs/builders/oracle-classic.html.md +++ b/website/source/docs/builders/oracle-classic.html.md @@ -89,9 +89,10 @@ obfuscated; you will need to add a working `username`, `password`, "password": "supersecretpasswordhere", "identity_domain": "#######", "api_endpoint": "https://api-###.compute.###.oraclecloud.com/", - "image_list": "/oracle/public/OL_7.2_UEKR4_x86_64", + "source_image_list": "/oracle/public/OL_7.2_UEKR4_x86_64", "shape": "oc3", "image_name": "Packer_Builder_Test_{{timestamp}}" + "dest_image_list": "Packer_Builder_Test_List" } ], "provisioners": [ From 63f1673909e7aad4822a80ecdd243a8c6babb488 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 30 Jan 2018 23:09:12 -0800 Subject: [PATCH 133/251] ssh deadlines --- communicator/ssh/communicator.go | 7 +++++ communicator/ssh/connection.go | 30 +++++++++++++++++++ helper/communicator/config.go | 1 + helper/communicator/step_connect_ssh.go | 1 + .../docs/templates/communicator.html.md | 3 ++ 5 files changed, 42 insertions(+) create mode 100644 communicator/ssh/connection.go diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index eabf32aea..17d7cb6b8 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -59,6 +59,9 @@ type Config struct { // KeepAliveInterval sets how often we send a channel request to the // server. A value < 0 disables. KeepAliveInterval time.Duration + + // Timeout is how long to wait for a read or write to succeed. + Timeout time.Duration } // Creates a new packer.Communicator implementation over SSH. This takes @@ -291,6 +294,10 @@ func (c *comm) reconnect() (err error) { return } + if c.config.Timeout > 0 { + c.conn = &timeoutConn{c.conn, c.config.Timeout, c.config.Timeout} + } + log.Printf("handshaking with SSH") // Default timeout to 1 minute if it wasn't specified (zero value). For diff --git a/communicator/ssh/connection.go b/communicator/ssh/connection.go new file mode 100644 index 000000000..c3df04543 --- /dev/null +++ b/communicator/ssh/connection.go @@ -0,0 +1,30 @@ +package ssh + +import ( + "net" + "time" +) + +// timeoutConn wraps a net.Conn, and sets a deadline for every read +// and write operation. +type timeoutConn struct { + net.Conn + ReadTimeout time.Duration + WriteTimeout time.Duration +} + +func (c *timeoutConn) Read(b []byte) (int, error) { + err := c.Conn.SetReadDeadline(time.Now().Add(c.ReadTimeout)) + if err != nil { + return 0, err + } + return c.Conn.Read(b) +} + +func (c *timeoutConn) Write(b []byte) (int, error) { + err := c.Conn.SetWriteDeadline(time.Now().Add(c.WriteTimeout)) + if err != nil { + return 0, err + } + return c.Conn.Write(b) +} diff --git a/helper/communicator/config.go b/helper/communicator/config.go index 8fe1032cb..f2664b192 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -38,6 +38,7 @@ type Config struct { SSHProxyUsername string `mapstructure:"ssh_proxy_username"` SSHProxyPassword string `mapstructure:"ssh_proxy_password"` SSHKeepAliveInterval time.Duration `mapstructure:"ssh_keep_alive_interval"` + SSHReadWriteTimeout time.Duration `mapstructure:"ssh_read_write_timeout"` // WinRM WinRMUser string `mapstructure:"winrm_username"` diff --git a/helper/communicator/step_connect_ssh.go b/helper/communicator/step_connect_ssh.go index 192dba739..18fd699c7 100644 --- a/helper/communicator/step_connect_ssh.go +++ b/helper/communicator/step_connect_ssh.go @@ -183,6 +183,7 @@ func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, cancel <-chan stru DisableAgentForwarding: s.Config.SSHDisableAgentForwarding, UseSftp: s.Config.SSHFileTransferMethod == "sftp", KeepAliveInterval: s.Config.SSHKeepAliveInterval, + Timeout: s.Config.SSHReadWriteTimeout, } log.Println("[INFO] Attempting SSH connection...") diff --git a/website/source/docs/templates/communicator.html.md b/website/source/docs/templates/communicator.html.md index 5fbb8a3d5..7b35370b4 100644 --- a/website/source/docs/templates/communicator.html.md +++ b/website/source/docs/templates/communicator.html.md @@ -106,6 +106,9 @@ The SSH communicator has the following options: - `ssh_pty` (boolean) - If true, a PTY will be requested for the SSH connection. This defaults to false. +* `ssh_read_write_timeout` (string) - The amount of time to wait for a remote + command to end. Example: "1h". Disabled by default. + - `ssh_timeout` (string) - The time to wait for SSH to become available. Packer uses this to determine when the machine has booted so this is usually quite long. Example value: "10m" From ff6425fb22a56c7d135b82a03ba8a2a195bfff63 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 31 Jan 2018 11:48:03 -0800 Subject: [PATCH 134/251] better documentation --- website/source/docs/templates/communicator.html.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/source/docs/templates/communicator.html.md b/website/source/docs/templates/communicator.html.md index 7b35370b4..0165ab5a1 100644 --- a/website/source/docs/templates/communicator.html.md +++ b/website/source/docs/templates/communicator.html.md @@ -107,7 +107,8 @@ The SSH communicator has the following options: connection. This defaults to false. * `ssh_read_write_timeout` (string) - The amount of time to wait for a remote - command to end. Example: "1h". Disabled by default. + command to end. This might be useful if, for example, packer hangs on + a connection after a reboot. Example: "5m". Disabled by default. - `ssh_timeout` (string) - The time to wait for SSH to become available. Packer uses this to determine when the machine has booted so this is From 73e8be022ddb836860ba777b8bbc13cf4a904e26 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 31 Jan 2018 12:45:38 -0800 Subject: [PATCH 135/251] call out hax accelerator issue --- website/source/docs/builders/qemu.html.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/website/source/docs/builders/qemu.html.md b/website/source/docs/builders/qemu.html.md index 4885aca1c..2d3c82d9e 100644 --- a/website/source/docs/builders/qemu.html.md +++ b/website/source/docs/builders/qemu.html.md @@ -115,6 +115,10 @@ Linux server and have not enabled X11 forwarding (`ssh -X`). you specified. When no accelerator is specified, Packer will try to use `kvm` if it is available but will default to `tcg` otherwise. + -> The `hax` accelerator has issues attaching CDROM ISOs. This is an + upstream issue which can be tracked + [here](https://github.com/intel/haxm/issues/20). + - `boot_command` (array of strings) - This is an array of commands to type when the virtual machine is first booted. The goal of these commands should be to type just enough to initialize the operating system installer. Special From 66cd85828e9e56925f95fb35266e17c089cc99f7 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 31 Jan 2018 12:48:40 -0800 Subject: [PATCH 136/251] rename dest_image_list_description to image_description --- builder/oracle/classic/config.go | 2 +- website/source/docs/builders/oracle-classic.html.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index c04235da9..4cd7c42a7 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -29,7 +29,7 @@ type Config struct { DestImageList string `mapstructure:"dest_image_list"` // Optional; if you don't enter anything, the image list description // will read "Packer-built image list" - DestImageListDescription string `mapstructure:"dest_image_list_description"` + DestImageListDescription string `mapstructure:"image_description"` // Optional. Describes what computers are allowed to reach your instance // via SSH. This whitelist must contain the computer you're running Packer // from. It defaults to public-internet, meaning that you can SSH into your diff --git a/website/source/docs/builders/oracle-classic.html.md b/website/source/docs/builders/oracle-classic.html.md index ac539b255..e71cc9cc3 100644 --- a/website/source/docs/builders/oracle-classic.html.md +++ b/website/source/docs/builders/oracle-classic.html.md @@ -63,7 +63,7 @@ This builder currently only works with the SSH communicator. ### Optional - - `dest_image_list_description` (string) - a description for your destination + - `image_description` (string) - a description for your destination image list. If you don't provide one, Packer will provide a generic description. - `ssh_username` (string) - The username that Packer will use to SSH into the From 918a50225ba3a6e0001b8991117f4c282e3da42e Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 31 Jan 2018 12:52:04 -0800 Subject: [PATCH 137/251] cleanup docs --- .../source/docs/builders/vmware-iso.html.md | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/website/source/docs/builders/vmware-iso.html.md b/website/source/docs/builders/vmware-iso.html.md index fd56a475f..256bf63e4 100644 --- a/website/source/docs/builders/vmware-iso.html.md +++ b/website/source/docs/builders/vmware-iso.html.md @@ -107,10 +107,10 @@ builder. actual file representing the disk will not use the full size unless it is full. By default this is set to 40,000 (about 40 GB). -- `disk_type_id` (string) - The type of VMware virtual disk to create. The - default is "1", which corresponds to a growable virtual disk split in 2GB - files. For ESXi, this defaults to "zeroedthick". This option is for - advanced usage. For ESXi the available options are: `zeroedthick`, `eagerzeroedthick`, `thin`, `rdm:dev`, `rdmp:dev`, `2gbsparse`. For desktop VMware clients: +- `disk_type_id` (string) - The type of VMware virtual disk to create. This + option is for advanced usage. + + For desktop VMware clients: Type ID | Description --- | --- @@ -121,6 +121,16 @@ builder. `4` | Preallocated virtual disk compatible with ESX server (VMFS flat). `5` | Compressed disk optimized for streaming. + The default is "1". + + For ESXi, this defaults to "zeroedthick". The available options for ESXi + are: `zeroedthick`, `eagerzeroedthick`, `thin`, `rdm:dev`, `rdmp:dev`, + `2gbsparse`. + + For more information, please consult the [Virtual Disk Manager User's + Guide](https://www.vmware.com/pdf/VirtualDiskManager.pdf) for desktop + VMware clients. For ESXi, refer to the proper ESXi documentation. + * `disable_vnc` (boolean) - Whether to create a VNC connection or not. A `boot_command` cannot be used when this is `false`. Defaults to `false`. From 9f87213ba454391bfaf83e771f10a64cc741c200 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 31 Jan 2018 13:08:25 -0800 Subject: [PATCH 138/251] tests and docs for #5831 --- provisioner/chef-client/provisioner.go | 4 ++++ provisioner/chef-client/provisioner_test.go | 24 +++++++++++++++++++ .../docs/provisioners/chef-client.html.md | 18 +++++++++++--- 3 files changed, 43 insertions(+), 3 deletions(-) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index 27d94c6c3..b0af8cbea 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -196,6 +196,10 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } } + if (p.config.PolicyName != "") != (p.config.PolicyGroup != "") { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("If either policy_name or policy_group are set, they must both be set.")) + } + jsonValid := true for k, v := range p.config.Json { p.config.Json[k], err = p.deepJsonFix(k, v) diff --git a/provisioner/chef-client/provisioner_test.go b/provisioner/chef-client/provisioner_test.go index ac148721a..acb674b53 100644 --- a/provisioner/chef-client/provisioner_test.go +++ b/provisioner/chef-client/provisioner_test.go @@ -243,3 +243,27 @@ func TestProvisioner_removeDir(t *testing.T) { } } } + +func TestProvisionerPrepare_policy(t *testing.T) { + var p Provisioner + + var policyTests = []struct { + name string + group string + success bool + }{ + {"", "", true}, + {"a", "b", true}, + {"a", "", false}, + {"", "a", false}, + } + for _, tt := range policyTests { + config := testConfig() + config["policy_name"] = tt.name + config["policy_group"] = tt.group + err := p.Prepare(config) + if (err == nil) != tt.success { + t.Fatalf("wasn't expecting %+v to fail: %s", tt, err.Error()) + } + } +} diff --git a/website/source/docs/provisioners/chef-client.html.md b/website/source/docs/provisioners/chef-client.html.md index 566742490..c40f41034 100644 --- a/website/source/docs/provisioners/chef-client.html.md +++ b/website/source/docs/provisioners/chef-client.html.md @@ -80,6 +80,12 @@ configuration is actually required. - `node_name` (string) - The name of the node to register with the Chef Server. This is optional and by default is packer-{{uuid}}. +* `policy_group` (string) - The name of a policy group that exists on the + Chef server. `policy_name` must also be specified. + +* `policy_name` (string) - The name of a policy, as identified by the name + setting in a `Policyfile.rb` file. `policy_group` must also be specified. + - `prevent_sudo` (boolean) - By default, the configured commands that are executed to install and run Chef are executed with `sudo`. If this is true, then the sudo will be omitted. This has no effect when guest\_os\_type is @@ -105,9 +111,9 @@ configuration is actually required. SSL certificates. If not set, this defaults to "verify\_peer" which validates all SSL certifications. -- `trusted_certs_dir` (string) - This is a directory that contains additional - SSL certificates to trust. Any certificates in this directory will be added to - whatever CA bundle ruby is using. Use this to add self-signed certs for your +- `trusted_certs_dir` (string) - This is a directory that contains additional + SSL certificates to trust. Any certificates in this directory will be added to + whatever CA bundle ruby is using. Use this to add self-signed certs for your Chef Server or local HTTP file servers. - `staging_directory` (string) - This is the directory where all the @@ -160,6 +166,12 @@ node_name "{{.NodeName}}" {{if ne .ChefEnvironment ""}} environment "{{.ChefEnvironment}}" {{end}} +{{if ne .PolicyGroup ""}} +policy_group "{{.PolicyGroup}}" +{{end}} +{{if ne .PolicyName ""}} +policy_name "{{.PolicyName}}" +{{end}} {{if ne .SslVerifyMode ""}} ssl_verify_mode :{{.SslVerifyMode}} {{end}} From 8bdd3b45c7065db6b12862b3801409784d4a8805 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 31 Jan 2018 13:19:31 -0800 Subject: [PATCH 139/251] use helper functions for reading vmx files --- builder/vmware/common/ssh.go | 13 ++----------- builder/vmware/common/step_configure_vmx.go | 5 +---- builder/vmware/common/step_configure_vnc.go | 15 ++------------- 3 files changed, 5 insertions(+), 28 deletions(-) diff --git a/builder/vmware/common/ssh.go b/builder/vmware/common/ssh.go index 871ba8df9..4e339edb9 100644 --- a/builder/vmware/common/ssh.go +++ b/builder/vmware/common/ssh.go @@ -3,9 +3,7 @@ package common import ( "errors" "fmt" - "io/ioutil" "log" - "os" commonssh "github.com/hashicorp/packer/common/ssh" "github.com/hashicorp/packer/communicator/ssh" @@ -23,18 +21,11 @@ func CommHost(config *SSHConfig) func(multistep.StateBag) (string, error) { } log.Println("Lookup up IP information...") - f, err := os.Open(vmxPath) + + vmxData, err := ReadVMX(vmxPath) if err != nil { return "", err } - defer f.Close() - - vmxBytes, err := ioutil.ReadAll(f) - if err != nil { - return "", err - } - - vmxData := ParseVMX(string(vmxBytes)) var ok bool macAddress := "" diff --git a/builder/vmware/common/step_configure_vmx.go b/builder/vmware/common/step_configure_vmx.go index 155234ac4..102ac52ee 100644 --- a/builder/vmware/common/step_configure_vmx.go +++ b/builder/vmware/common/step_configure_vmx.go @@ -3,7 +3,6 @@ package common import ( "context" "fmt" - "io/ioutil" "log" "regexp" "strings" @@ -26,7 +25,7 @@ func (s *StepConfigureVMX) Run(_ context.Context, state multistep.StateBag) mult ui := state.Get("ui").(packer.Ui) vmxPath := state.Get("vmx_path").(string) - vmxContents, err := ioutil.ReadFile(vmxPath) + vmxData, err := ReadVMX(vmxPath) if err != nil { err := fmt.Errorf("Error reading VMX file: %s", err) state.Put("error", err) @@ -34,8 +33,6 @@ func (s *StepConfigureVMX) Run(_ context.Context, state multistep.StateBag) mult return multistep.ActionHalt } - vmxData := ParseVMX(string(vmxContents)) - // Set this so that no dialogs ever appear from Packer. vmxData["msg.autoanswer"] = "true" diff --git a/builder/vmware/common/step_configure_vnc.go b/builder/vmware/common/step_configure_vnc.go index 90aa4a998..54f5ac182 100644 --- a/builder/vmware/common/step_configure_vnc.go +++ b/builder/vmware/common/step_configure_vnc.go @@ -3,11 +3,9 @@ package common import ( "context" "fmt" - "io/ioutil" "log" "math/rand" "net" - "os" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" @@ -87,17 +85,9 @@ func (s *StepConfigureVNC) Run(_ context.Context, state multistep.StateBag) mult ui := state.Get("ui").(packer.Ui) vmxPath := state.Get("vmx_path").(string) - f, err := os.Open(vmxPath) + vmxData, err := ReadVMX(vmxPath) if err != nil { - err := fmt.Errorf("Error reading VMX data: %s", err) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt - } - - vmxBytes, err := ioutil.ReadAll(f) - if err != nil { - err := fmt.Errorf("Error reading VMX data: %s", err) + err := fmt.Errorf("Error reading VMX file: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt @@ -121,7 +111,6 @@ func (s *StepConfigureVNC) Run(_ context.Context, state multistep.StateBag) mult log.Printf("Found available VNC port: %d", vncPort) - vmxData := ParseVMX(string(vmxBytes)) vncFinder.UpdateVMX(vncBindAddress, vncPassword, vncPort, vmxData) if err := WriteVMX(vmxPath, vmxData); err != nil { From 3180dc327c09f180ad9d6119ff504f372e028888 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 31 Jan 2018 15:02:19 -0800 Subject: [PATCH 140/251] remove copypasta comment --- builder/oracle/classic/step_list_images.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/builder/oracle/classic/step_list_images.go b/builder/oracle/classic/step_list_images.go index a32355e79..64255b428 100644 --- a/builder/oracle/classic/step_list_images.go +++ b/builder/oracle/classic/step_list_images.go @@ -24,8 +24,8 @@ func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multis } imList, err := imageListClient.GetImageList(&getInput) if err != nil { - ui.Say(fmt.Sprintf(err.Error())) // If the list didn't exist, create it. + ui.Say(fmt.Sprintf(err.Error())) ui.Say(fmt.Sprintf("Destination image list %s does not exist; Creating it...", config.DestImageList)) @@ -34,7 +34,6 @@ func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multis Description: "Packer-built image list", } - // Load the packer-generated SSH key into the Oracle Compute cloud. imList, err = imageListClient.CreateImageList(&ilInput) if err != nil { err = fmt.Errorf("Problem creating image list: %s", err) From 8f7937f492de127ed72237de6b7a8df8d49c0301 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 31 Jan 2018 15:22:09 -0800 Subject: [PATCH 141/251] fix machine image name to include prepended / --- builder/oracle/classic/step_list_images.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/builder/oracle/classic/step_list_images.go b/builder/oracle/classic/step_list_images.go index 64255b428..4125733b0 100644 --- a/builder/oracle/classic/step_list_images.go +++ b/builder/oracle/classic/step_list_images.go @@ -49,9 +49,10 @@ func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multis version := len(imList.Entries) + 1 entriesClient := client.ImageListEntries() entriesInput := compute.CreateImageListEntryInput{ - Name: config.DestImageList, - MachineImages: []string{fmt.Sprintf("Compute-%s/%s/%s", config.IdentityDomain, config.Username, snap.MachineImage)}, - Version: version, + Name: config.DestImageList, + MachineImages: []string{fmt.Sprintf("/Compute-%s/%s/%s", + config.IdentityDomain, config.Username, snap.MachineImage)}, + Version: version, } entryInfo, err := entriesClient.CreateImageListEntry(&entriesInput) if err != nil { From 383ac13e2a5459d9d2d4f6d7d0101057f4c85669 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 31 Jan 2018 16:37:55 -0800 Subject: [PATCH 142/251] update default of image list after adding new entry. --- builder/oracle/classic/step_list_images.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/builder/oracle/classic/step_list_images.go b/builder/oracle/classic/step_list_images.go index 4125733b0..a99612c14 100644 --- a/builder/oracle/classic/step_list_images.go +++ b/builder/oracle/classic/step_list_images.go @@ -64,13 +64,25 @@ func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multis state.Put("image_list_entry", entryInfo) ui.Message(fmt.Sprintf("created image list entry %s", entryInfo.Name)) - imList, err = imageListClient.GetImageList(&getInput) - machineImagesClient := client.MachineImages() getImagesInput := compute.GetMachineImageInput{ Name: config.ImageName, } + // Update image list default to use latest version + updateInput := compute.UpdateImageListInput{ + Default: version, + Description: config.DestImageListDescription, + Name: config.DestImageList, + } + _, err = imageListClient.UpdateImageList(&updateInput) + if err != nil { + err = fmt.Errorf("Problem updating default image list version: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } + // Grab info about the machine image to return with the artifact imInfo, err := machineImagesClient.GetMachineImage(&getImagesInput) if err != nil { From 59ecaee2fe7db7db73baa4d5d345bb85498505c9 Mon Sep 17 00:00:00 2001 From: SharePointOscar Date: Wed, 31 Jan 2018 18:00:13 -0800 Subject: [PATCH 143/251] implemented changes as per feedback for Azure CLI 2.0 --- contrib/azure-setup.sh | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/contrib/azure-setup.sh b/contrib/azure-setup.sh index 7e6817e74..75e770055 100755 --- a/contrib/azure-setup.sh +++ b/contrib/azure-setup.sh @@ -25,9 +25,9 @@ showhelp() { echo "" echo " For simplicity we make a lot of assumptions and choose reasonable" echo " defaults. If you want more control over what happens, please use" - echo " the azure cli directly." + echo " the azure-cli directly." echo "" - echo " Note that you must already have an az account, username," + echo " Note that you must already have an Azure account, username," echo " password, and subscription. You can create those here:" echo "" echo " - https://account.windowsazure.com/" @@ -54,8 +54,9 @@ requirements() { found=$((found + 1)) echo "Found azure-cli version: $azureversion" else - echo "azure cli is missing. Please install azure cli from" - echo "https://az.microsoft.com/en-us/documentation/articles/xplat-cli-install/" + echo "azure-cli is missing. Please install azure-cli from" + echo "https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest" + echo "Alternatively, you can use the Cloud Shell https://docs.microsoft.com/en-us/azure/cloud-shell/overview right from the Azure Portal or even VS Code." fi jqversion=$(jq --version) @@ -169,7 +170,7 @@ createApplication() { createServicePrincipal() { echo "==> Creating service principal" - # az CLI 0.10.2 introduced a breaking change, where appId must be supplied with the -a switch + # Azure CLI 0.10.2 introduced a breaking change, where appId must be supplied with the -a switch # prior version accepted appId as the only parameter without a switch newer_syntax=false IFS='.' read -ra azureversionsemver <<< "$azureversion" From f3c326bb3c47653d7f7ffa76bc02e94eec8c4b4b Mon Sep 17 00:00:00 2001 From: DanHam Date: Thu, 21 Sep 2017 20:18:47 +0100 Subject: [PATCH 144/251] Escape chars special to PowerShell in user supplied data --- provisioner/powershell/provisioner.go | 85 ++++++++++++++++++++++++++- 1 file changed, 82 insertions(+), 3 deletions(-) diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go index 3d2f16f85..cc11ebc0c 100644 --- a/provisioner/powershell/provisioner.go +++ b/provisioner/powershell/provisioner.go @@ -8,12 +8,14 @@ import ( "encoding/xml" "errors" "fmt" + "io" "io/ioutil" "log" "os" "sort" "strings" "time" + "unicode/utf8" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/common/uuid" @@ -345,6 +347,8 @@ func (p *Provisioner) prepareEnvVars(elevated bool) (envVarPath string, err erro } func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { + var buf bytes.Buffer + escapedEnvVarValue := "" flattened = "" envVars := make(map[string]string) @@ -359,7 +363,16 @@ func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { // Split vars into key/value components for _, envVar := range p.config.Vars { keyValue := strings.SplitN(envVar, "=", 2) - envVars[keyValue[0]] = keyValue[1] + // Escape chars special to PS in each env var value + err := escapeSpecialPS(&buf, []byte(keyValue[1])) + if err != nil { + fmt.Printf("An error occured escaping chars special to PowerShell in env var value %s", keyValue[1]) + } + escapedEnvVarValue = buf.String() + buf.Reset() + + log.Printf("Env var %s converted to %s after escaping chars special to PS", keyValue[1], escapedEnvVarValue) + envVars[keyValue[0]] = escapedEnvVarValue } // Create a list of env var keys in sorted order @@ -483,10 +496,27 @@ func (p *Provisioner) generateElevatedRunner(command string) (uploadedPath strin buffer.Reset() + // Escape characters special to PowerShell in the ElevatedUser string + err = escapeSpecialPS(&buffer, []byte(p.config.ElevatedUser)) + if err != nil { + fmt.Printf("Error escaping chars special to Powershell in ElevatedUser %s", p.config.ElevatedUser) + } + escapedElevatedUser := buffer.String() + log.Printf("Elevated user %s converted to %s after escaping chars special to PowerShell", p.config.ElevatedUser, escapedElevatedUser) + buffer.Reset() + // Escape characters special to PowerShell in the ElevatedPassword string + err = escapeSpecialPS(&buffer, []byte(p.config.ElevatedPassword)) + if err != nil { + fmt.Printf("Error escaping chars special to Powershell in ElevatedPassword %s", p.config.ElevatedPassword) + } + escapedElevatedPassword := buffer.String() + log.Printf("Elevated password %s converted to %s after escaping chars special to PowerShell", p.config.ElevatedPassword, escapedElevatedPassword) + buffer.Reset() + // Generate command err = elevatedTemplate.Execute(&buffer, elevatedOptions{ - User: p.config.ElevatedUser, - Password: p.config.ElevatedPassword, + User: escapedElevatedUser, + Password: escapedElevatedPassword, TaskName: taskName, TaskDescription: "Packer elevated task", LogFile: logFile, @@ -509,3 +539,52 @@ func (p *Provisioner) generateElevatedRunner(command string) (uploadedPath strin path = fmt.Sprintf("%s-%s.ps1", "%TEMP%\\packer-elevated-shell", uuid) return path, err } + +func escapeSpecialPS(w io.Writer, s []byte) error { + var esc []byte + + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRune(s[i:]) + i += width + switch r { + case '"': + esc = []byte("`\"") // Double quotes + case '\'': + esc = []byte("`'") // Single quotes + case '$': + esc = []byte("`$") // Dollar sign + case '`': + esc = []byte("``") // Backticks + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + esc = []byte("\uFFFD") // Unicode replacement character + break + } + continue + } + if _, err := w.Write(s[last : i-width]); err != nil { + return err + } + if _, err := w.Write(esc); err != nil { + return err + } + last = i + } + if _, err := w.Write(s[last:]); err != nil { + return err + } + return nil +} + +// Decide whether the given rune is in the XML Character Range, per +// the Char production of http://www.xml.com/axml/testaxml.htm, +// Section 2.2 Characters. +func isInCharacterRange(r rune) (inrange bool) { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xDF77 || + r >= 0xE000 && r <= 0xFFFD || + r >= 0x10000 && r <= 0x10FFFF +} From b67c64fd66874d64dc24bd7bda26c895e877135b Mon Sep 17 00:00:00 2001 From: DanHam Date: Thu, 21 Sep 2017 23:26:14 +0100 Subject: [PATCH 145/251] Tests for escape of chars special to PowerShell in user supplied data --- provisioner/powershell/provisioner_test.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/provisioner/powershell/provisioner_test.go b/provisioner/powershell/provisioner_test.go index 749564d4d..2791601fa 100644 --- a/provisioner/powershell/provisioner_test.go +++ b/provisioner/powershell/provisioner_test.go @@ -518,6 +518,12 @@ func TestProvisioner_createFlattenedElevatedEnvVars_windows(t *testing.T) { {"FOO=bar", "BAZ=qux"}, // Multiple user env vars {"FOO=bar=baz"}, // User env var with value containing equals {"FOO==bar"}, // User env var with value starting with equals + // Test escaping of characters special to PowerShell + {"FOO=bar$baz"}, // User env var with value containing dollar + {"FOO=bar\"baz"}, // User env var with value containing a double quote + {"FOO=bar'baz"}, // User env var with value containing a single quote + {"FOO=bar`baz"}, // User env var with value containing a backtick + } expected := []string{ `$env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, @@ -525,6 +531,10 @@ func TestProvisioner_createFlattenedElevatedEnvVars_windows(t *testing.T) { `$env:BAZ="qux"; $env:FOO="bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, `$env:FOO="bar=baz"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, `$env:FOO="=bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, + "$env:FOO=\"bar`$baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", + "$env:FOO=\"bar`\"baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", + "$env:FOO=\"bar`'baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", + "$env:FOO=\"bar``baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", } p := new(Provisioner) @@ -553,6 +563,11 @@ func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) { {"FOO=bar", "BAZ=qux"}, // Multiple user env vars {"FOO=bar=baz"}, // User env var with value containing equals {"FOO==bar"}, // User env var with value starting with equals + // Test escaping of characters special to PowerShell + {"FOO=bar$baz"}, // User env var with value containing dollar + {"FOO=bar\"baz"}, // User env var with value containing a double quote + {"FOO=bar'baz"}, // User env var with value containing a single quote + {"FOO=bar`baz"}, // User env var with value containing a backtick } expected := []string{ `$env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, @@ -560,6 +575,10 @@ func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) { `$env:BAZ="qux"; $env:FOO="bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, `$env:FOO="bar=baz"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, `$env:FOO="=bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, + "$env:FOO=\"bar`$baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", + "$env:FOO=\"bar`\"baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", + "$env:FOO=\"bar`'baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", + "$env:FOO=\"bar``baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", } p := new(Provisioner) From 68e13c90b1c7ef50a7c262a4e1341ce2dafce5bb Mon Sep 17 00:00:00 2001 From: DanHam Date: Sun, 15 Oct 2017 14:30:17 +0100 Subject: [PATCH 146/251] Revert "Escape chars special to PowerShell in user supplied data" This reverts commit 53aefe744bcd74ee6ac866f764eafe9e7f507d80. --- provisioner/powershell/provisioner.go | 85 +-------------------------- 1 file changed, 3 insertions(+), 82 deletions(-) diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go index cc11ebc0c..3d2f16f85 100644 --- a/provisioner/powershell/provisioner.go +++ b/provisioner/powershell/provisioner.go @@ -8,14 +8,12 @@ import ( "encoding/xml" "errors" "fmt" - "io" "io/ioutil" "log" "os" "sort" "strings" "time" - "unicode/utf8" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/common/uuid" @@ -347,8 +345,6 @@ func (p *Provisioner) prepareEnvVars(elevated bool) (envVarPath string, err erro } func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { - var buf bytes.Buffer - escapedEnvVarValue := "" flattened = "" envVars := make(map[string]string) @@ -363,16 +359,7 @@ func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { // Split vars into key/value components for _, envVar := range p.config.Vars { keyValue := strings.SplitN(envVar, "=", 2) - // Escape chars special to PS in each env var value - err := escapeSpecialPS(&buf, []byte(keyValue[1])) - if err != nil { - fmt.Printf("An error occured escaping chars special to PowerShell in env var value %s", keyValue[1]) - } - escapedEnvVarValue = buf.String() - buf.Reset() - - log.Printf("Env var %s converted to %s after escaping chars special to PS", keyValue[1], escapedEnvVarValue) - envVars[keyValue[0]] = escapedEnvVarValue + envVars[keyValue[0]] = keyValue[1] } // Create a list of env var keys in sorted order @@ -496,27 +483,10 @@ func (p *Provisioner) generateElevatedRunner(command string) (uploadedPath strin buffer.Reset() - // Escape characters special to PowerShell in the ElevatedUser string - err = escapeSpecialPS(&buffer, []byte(p.config.ElevatedUser)) - if err != nil { - fmt.Printf("Error escaping chars special to Powershell in ElevatedUser %s", p.config.ElevatedUser) - } - escapedElevatedUser := buffer.String() - log.Printf("Elevated user %s converted to %s after escaping chars special to PowerShell", p.config.ElevatedUser, escapedElevatedUser) - buffer.Reset() - // Escape characters special to PowerShell in the ElevatedPassword string - err = escapeSpecialPS(&buffer, []byte(p.config.ElevatedPassword)) - if err != nil { - fmt.Printf("Error escaping chars special to Powershell in ElevatedPassword %s", p.config.ElevatedPassword) - } - escapedElevatedPassword := buffer.String() - log.Printf("Elevated password %s converted to %s after escaping chars special to PowerShell", p.config.ElevatedPassword, escapedElevatedPassword) - buffer.Reset() - // Generate command err = elevatedTemplate.Execute(&buffer, elevatedOptions{ - User: escapedElevatedUser, - Password: escapedElevatedPassword, + User: p.config.ElevatedUser, + Password: p.config.ElevatedPassword, TaskName: taskName, TaskDescription: "Packer elevated task", LogFile: logFile, @@ -539,52 +509,3 @@ func (p *Provisioner) generateElevatedRunner(command string) (uploadedPath strin path = fmt.Sprintf("%s-%s.ps1", "%TEMP%\\packer-elevated-shell", uuid) return path, err } - -func escapeSpecialPS(w io.Writer, s []byte) error { - var esc []byte - - last := 0 - for i := 0; i < len(s); { - r, width := utf8.DecodeRune(s[i:]) - i += width - switch r { - case '"': - esc = []byte("`\"") // Double quotes - case '\'': - esc = []byte("`'") // Single quotes - case '$': - esc = []byte("`$") // Dollar sign - case '`': - esc = []byte("``") // Backticks - default: - if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { - esc = []byte("\uFFFD") // Unicode replacement character - break - } - continue - } - if _, err := w.Write(s[last : i-width]); err != nil { - return err - } - if _, err := w.Write(esc); err != nil { - return err - } - last = i - } - if _, err := w.Write(s[last:]); err != nil { - return err - } - return nil -} - -// Decide whether the given rune is in the XML Character Range, per -// the Char production of http://www.xml.com/axml/testaxml.htm, -// Section 2.2 Characters. -func isInCharacterRange(r rune) (inrange bool) { - return r == 0x09 || - r == 0x0A || - r == 0x0D || - r >= 0x20 && r <= 0xDF77 || - r >= 0xE000 && r <= 0xFFFD || - r >= 0x10000 && r <= 0x10FFFF -} From 7443adf2fd3560b309e3628420dc231bc078c204 Mon Sep 17 00:00:00 2001 From: DanHam Date: Sun, 15 Oct 2017 13:28:59 +0100 Subject: [PATCH 147/251] Simpler escape of chars special to PowerShell in user supplied data --- provisioner/powershell/provisioner.go | 34 +++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go index 3d2f16f85..41b38ed6d 100644 --- a/provisioner/powershell/provisioner.go +++ b/provisioner/powershell/provisioner.go @@ -24,6 +24,13 @@ import ( var retryableSleep = 2 * time.Second +var psEscape = strings.NewReplacer( + "$", "`$", + "\"", "`\"", + "`", "``", + "'", "`'", +) + type Config struct { common.PackerConfig `mapstructure:",squash"` @@ -359,7 +366,13 @@ func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { // Split vars into key/value components for _, envVar := range p.config.Vars { keyValue := strings.SplitN(envVar, "=", 2) - envVars[keyValue[0]] = keyValue[1] + // Escape chars special to PS in each env var value + escapedEnvVarValue := psEscape.Replace(keyValue[1]) + if escapedEnvVarValue != keyValue[1] { + log.Printf("Env var %s converted to %s after escaping chars special to PS", keyValue[1], + escapedEnvVarValue) + } + envVars[keyValue[0]] = escapedEnvVarValue } // Create a list of env var keys in sorted order @@ -480,13 +493,26 @@ func (p *Provisioner) generateElevatedRunner(command string) (uploadedPath strin } escapedCommand := buffer.String() log.Printf("Command [%s] converted to [%s] for use in XML string", command, escapedCommand) - buffer.Reset() + // Escape chars special to PowerShell in the ElevatedUser string + escapedElevatedUser := psEscape.Replace(p.config.ElevatedUser) + if escapedElevatedUser != p.config.ElevatedUser { + log.Printf("Elevated user %s converted to %s after escaping chars special to PowerShell", + p.config.ElevatedUser, escapedElevatedUser) + } + + // Escape chars special to PowerShell in the ElevatedPassword string + escapedElevatedPassword := psEscape.Replace(p.config.ElevatedPassword) + if escapedElevatedPassword != p.config.ElevatedPassword { + log.Printf("Elevated password %s converted to %s after escaping chars special to PowerShell", + p.config.ElevatedPassword, escapedElevatedPassword) + } + // Generate command err = elevatedTemplate.Execute(&buffer, elevatedOptions{ - User: p.config.ElevatedUser, - Password: p.config.ElevatedPassword, + User: escapedElevatedUser, + Password: escapedElevatedPassword, TaskName: taskName, TaskDescription: "Packer elevated task", LogFile: logFile, From 6ad4917960c251e16ac0f6d6f4e0a9dcf27e2f01 Mon Sep 17 00:00:00 2001 From: DanHam Date: Mon, 30 Oct 2017 00:14:06 +0000 Subject: [PATCH 148/251] First attempt at fixer for powershell escapes --- fix/fixer.go | 2 + fix/fixer_powershell_escapes.go | 73 +++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+) create mode 100644 fix/fixer_powershell_escapes.go diff --git a/fix/fixer.go b/fix/fixer.go index 5ca0b3a18..d5622b299 100644 --- a/fix/fixer.go +++ b/fix/fixer.go @@ -34,6 +34,7 @@ func init() { "amazon-shutdown_behavior": new(FixerAmazonShutdownBehavior), "amazon-enhanced-networking": new(FixerAmazonEnhancedNetworking), "docker-email": new(FixerDockerEmail), + "powershell-escapes": new(FixerPowerShellEscapes), } FixerOrder = []string{ @@ -51,5 +52,6 @@ func init() { "amazon-shutdown_behavior", "amazon-enhanced-networking", "docker-email", + "powershell-escapes", } } diff --git a/fix/fixer_powershell_escapes.go b/fix/fixer_powershell_escapes.go new file mode 100644 index 000000000..9da9ae91f --- /dev/null +++ b/fix/fixer_powershell_escapes.go @@ -0,0 +1,73 @@ +package fix + +import ( + "github.com/mitchellh/mapstructure" + "strings" +) + +// FixerPowerShellEscapes removes the PowerShell escape character from user +// environment variables and elevated username and password strings +type FixerPowerShellEscapes struct{} + +func (FixerPowerShellEscapes) Fix(input map[string]interface{}) (map[string]interface{}, error) { + type template struct { + Provisioners []interface{} + } + + var psUnescape = strings.NewReplacer( + "`$", "$", + "`\"", "\"", + "``", "`", + "`'", "'", + ) + + // Decode the input into our structure, if we can + var tpl template + if err := mapstructure.WeakDecode(input, &tpl); err != nil { + return nil, err + } + + for i, raw := range tpl.Provisioners { + var provisioners map[string]interface{} + if err := mapstructure.Decode(raw, &provisioners); err != nil { + // Ignore errors, could be a non-map + continue + } + + if ok := provisioners["type"] == "powershell"; !ok { + continue + } + + if _, ok := provisioners["elevated_user"]; ok { + provisioners["elevated_user"] = psUnescape.Replace(provisioners["elevated_user"].(string)) + } + if _, ok := provisioners["elevated_password"]; ok { + provisioners["elevated_password"] = psUnescape.Replace(provisioners["elevated_password"].(string)) + } + if raw, ok := provisioners["environment_vars"]; ok { + var env_vars []string + if err := mapstructure.Decode(raw, &env_vars); err != nil { + continue + } + env_vars_unescaped := make([]interface{}, len(env_vars)) + for j, env_var := range env_vars { + env_vars_unescaped[j] = psUnescape.Replace(env_var) + } + // Replace with unescaped environment variables + provisioners["environment_vars"] = env_vars_unescaped + } + + // Write all changes back to template + tpl.Provisioners[i] = provisioners + } + + if len(tpl.Provisioners) > 0 { + input["provisioners"] = tpl.Provisioners + } + + return input, nil +} + +func (FixerPowerShellEscapes) Synopsis() string { + return `Removes PowerShell escapes from user env vars and elevated username and password strings` +} From d25a26e78a6f1f729394ede86e476dc9d01f3aed Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 1 Feb 2018 14:52:22 -0800 Subject: [PATCH 149/251] update changelog in prep for 1.2.0 --- CHANGELOG.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b3920501e..1d73eb0ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ ## (UNRELEASED) +### BACKWARDS INCOMPATIBILITIES: +* core: Affects Windows guests: User variables containing Powershell special characters no longer need to be escaped.[GH-5376] +* provisioner/powershell: Regression from v1.1.1 forcing extra escaping of environment variables in the non-elevated provisioner has been fixed. [GH-5515] +* 3rd party plugins: We have moved internal dependencies, meaning your 3rd party plugins will break; the work to fix them is minimal and documented in GH-5810. [GH-5810] + ### IMPROVEMENTS: * builder/docker: Remove credentials from being shown in the log. [GH-5666] @@ -9,10 +14,31 @@ * builder/amazon: Warn during prepare if we didn't get both an access key and a secret key when we were expecting one. [GH-5762] * builder/amazon: Replace `InstanceStatusOK` check with `InstanceReady`. This reduces build times universally while still working for all instance types. [GH-5678] * builder/amazon: Add `kms_key_id` option to block device mappings. [GH-5774] +* builder/hyper-v: New option to use differential disks and Inline disk creation to improve build time and reduce disk usage [GH-5631] +* post-processor/vagrant: Add vagrant post-processor support for Google [GH-5732] +* provisioner/chef: Added Policyfile support to chef-client provisioner. [GH-5831] +* builder/qemu: Add Intel HAXM support to QEMU builder [GH-5738] +* communicator/ssh: Add session-level keep-alives [GH-5830] +* post-processor/amazon-import: Allow user to specify role name in amazon-import [GH-5817] +* provisioner/chef: Add support for 'trusted_certs_dir' chef-client configuration option [GH-5790] +* builder/triton: Updated triton-go dependencies, allowing better error handling. [GH-5795] +* core: Improved error logging in floppy file handling. [GH-5802] +* provisioner/amazon: Use Amazon SDK's InstanceRunning waiter instead of InstanceStatusOK waiter [GH-5773] +* builder/amazon: Add Paris region (eu-west-3) [GH-5718] +* builder/azure: Add validation for incorrect VHD URLs [GH-5695] +* builder/amazon: Remove Session Token (STS) from being shown in the log. [GH-5665] ### BUG FIXES: * builder/alicloud-ecs: Attach keypair before starting instance in alicloud builder [GH-5739] +* builder/vmware: Fixed file handle leak that may have caused race conditions in vmware builder [GH-5767] +* provisioner/powershell: Regression from v1.1.1 forcing extra escaping of environment variables in the non-elevated provisioner has been fixed. [GH-5515] +* provisioner/ansible: The "default extra variables" feature added in Packer v1.0.1 caused the ansible-local provisioner to fail when an --extra-vars argument was specified in the extra_arguments configuration option; this has been fixed. [GH-5335] +* communicator/ssh: Add deadline to SSH connection to prevent Packer hangs after script provisioner reboots vm [GH-4684] +* builder/virtualbox: Fix regression affecting users running Packer on a Windows host that kept Packer from finding Virtualbox guest additions if Packer ran on a different drive from the one where the guest additions were stored. [GH-5761] +* builder/virtualbox: Fix interpolation ordering so that edge cases around guest_additions_url are handled correctly [GH-5757] +* builder/amazon: NewSession now inherits MaxRetries and other settings. [GH-5719] + ## 1.1.3 (December 8, 2017) From 30a4998a810fb4d2be485dde0b782e2e2129c2a6 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 1 Feb 2018 16:35:10 -0800 Subject: [PATCH 150/251] branding --- website/source/docs/builders/oracle-classic.html.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/source/docs/builders/oracle-classic.html.md b/website/source/docs/builders/oracle-classic.html.md index e71cc9cc3..0d9dc05b7 100644 --- a/website/source/docs/builders/oracle-classic.html.md +++ b/website/source/docs/builders/oracle-classic.html.md @@ -3,16 +3,16 @@ description: The oracle-classic builder is able to create new custom images for use with Oracle Compute Cloud. layout: docs -page_title: 'Oracle Classic - Builders' +page_title: 'Oracle Cloud Infrastructure Classic - Builders' sidebar_current: 'docs-builders-oracle-classic' --- -# Oracle Compute Cloud (Classic) Builder +# Oracle Cloud Infrastructure Classic Compute Builder Type: `oracle-classic` The `oracle-classic` Packer builder is able to create custom images for use -with [Oracle Compute Cloud](https://cloud.oracle.com/compute-classic). The builder +with [Oracle Cloud Infrastructure Classic Compute](https://cloud.oracle.com/compute-classic). The builder takes a base image, runs any provisioning necessary on the base image after launching it, and finally snapshots it creating a reusable custom image. @@ -25,7 +25,7 @@ to use it or delete it. ## Authorization -This builder authenticates API calls to Compute Classic using basic +This builder authenticates API calls to Oracle Cloud Infrastructure Classic Compute using basic authentication (user name and password). To read more, see the [authentication documentation](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsa/Authentication.html) From 79fe9003784d0bf69eaa910366c77ad94ad978fa Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 2 Feb 2018 09:57:36 -0800 Subject: [PATCH 151/251] Revert "Merge pull request #5376 from DanHam/ps-escapes" Revert so that we can merge a different branch that's had more recent work instead This reverts commit ba518637d49380955f4bd2f0e6009888037fa82e, reversing changes made to e56849c6056220b8dd555f841f7e0a5028b25af0. --- fix/fixer.go | 2 - fix/fixer_powershell_escapes.go | 73 ---------------------- provisioner/powershell/provisioner.go | 34 ++-------- provisioner/powershell/provisioner_test.go | 19 ------ 4 files changed, 4 insertions(+), 124 deletions(-) delete mode 100644 fix/fixer_powershell_escapes.go diff --git a/fix/fixer.go b/fix/fixer.go index d5622b299..5ca0b3a18 100644 --- a/fix/fixer.go +++ b/fix/fixer.go @@ -34,7 +34,6 @@ func init() { "amazon-shutdown_behavior": new(FixerAmazonShutdownBehavior), "amazon-enhanced-networking": new(FixerAmazonEnhancedNetworking), "docker-email": new(FixerDockerEmail), - "powershell-escapes": new(FixerPowerShellEscapes), } FixerOrder = []string{ @@ -52,6 +51,5 @@ func init() { "amazon-shutdown_behavior", "amazon-enhanced-networking", "docker-email", - "powershell-escapes", } } diff --git a/fix/fixer_powershell_escapes.go b/fix/fixer_powershell_escapes.go deleted file mode 100644 index 9da9ae91f..000000000 --- a/fix/fixer_powershell_escapes.go +++ /dev/null @@ -1,73 +0,0 @@ -package fix - -import ( - "github.com/mitchellh/mapstructure" - "strings" -) - -// FixerPowerShellEscapes removes the PowerShell escape character from user -// environment variables and elevated username and password strings -type FixerPowerShellEscapes struct{} - -func (FixerPowerShellEscapes) Fix(input map[string]interface{}) (map[string]interface{}, error) { - type template struct { - Provisioners []interface{} - } - - var psUnescape = strings.NewReplacer( - "`$", "$", - "`\"", "\"", - "``", "`", - "`'", "'", - ) - - // Decode the input into our structure, if we can - var tpl template - if err := mapstructure.WeakDecode(input, &tpl); err != nil { - return nil, err - } - - for i, raw := range tpl.Provisioners { - var provisioners map[string]interface{} - if err := mapstructure.Decode(raw, &provisioners); err != nil { - // Ignore errors, could be a non-map - continue - } - - if ok := provisioners["type"] == "powershell"; !ok { - continue - } - - if _, ok := provisioners["elevated_user"]; ok { - provisioners["elevated_user"] = psUnescape.Replace(provisioners["elevated_user"].(string)) - } - if _, ok := provisioners["elevated_password"]; ok { - provisioners["elevated_password"] = psUnescape.Replace(provisioners["elevated_password"].(string)) - } - if raw, ok := provisioners["environment_vars"]; ok { - var env_vars []string - if err := mapstructure.Decode(raw, &env_vars); err != nil { - continue - } - env_vars_unescaped := make([]interface{}, len(env_vars)) - for j, env_var := range env_vars { - env_vars_unescaped[j] = psUnescape.Replace(env_var) - } - // Replace with unescaped environment variables - provisioners["environment_vars"] = env_vars_unescaped - } - - // Write all changes back to template - tpl.Provisioners[i] = provisioners - } - - if len(tpl.Provisioners) > 0 { - input["provisioners"] = tpl.Provisioners - } - - return input, nil -} - -func (FixerPowerShellEscapes) Synopsis() string { - return `Removes PowerShell escapes from user env vars and elevated username and password strings` -} diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go index 41b38ed6d..3d2f16f85 100644 --- a/provisioner/powershell/provisioner.go +++ b/provisioner/powershell/provisioner.go @@ -24,13 +24,6 @@ import ( var retryableSleep = 2 * time.Second -var psEscape = strings.NewReplacer( - "$", "`$", - "\"", "`\"", - "`", "``", - "'", "`'", -) - type Config struct { common.PackerConfig `mapstructure:",squash"` @@ -366,13 +359,7 @@ func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { // Split vars into key/value components for _, envVar := range p.config.Vars { keyValue := strings.SplitN(envVar, "=", 2) - // Escape chars special to PS in each env var value - escapedEnvVarValue := psEscape.Replace(keyValue[1]) - if escapedEnvVarValue != keyValue[1] { - log.Printf("Env var %s converted to %s after escaping chars special to PS", keyValue[1], - escapedEnvVarValue) - } - envVars[keyValue[0]] = escapedEnvVarValue + envVars[keyValue[0]] = keyValue[1] } // Create a list of env var keys in sorted order @@ -493,26 +480,13 @@ func (p *Provisioner) generateElevatedRunner(command string) (uploadedPath strin } escapedCommand := buffer.String() log.Printf("Command [%s] converted to [%s] for use in XML string", command, escapedCommand) + buffer.Reset() - // Escape chars special to PowerShell in the ElevatedUser string - escapedElevatedUser := psEscape.Replace(p.config.ElevatedUser) - if escapedElevatedUser != p.config.ElevatedUser { - log.Printf("Elevated user %s converted to %s after escaping chars special to PowerShell", - p.config.ElevatedUser, escapedElevatedUser) - } - - // Escape chars special to PowerShell in the ElevatedPassword string - escapedElevatedPassword := psEscape.Replace(p.config.ElevatedPassword) - if escapedElevatedPassword != p.config.ElevatedPassword { - log.Printf("Elevated password %s converted to %s after escaping chars special to PowerShell", - p.config.ElevatedPassword, escapedElevatedPassword) - } - // Generate command err = elevatedTemplate.Execute(&buffer, elevatedOptions{ - User: escapedElevatedUser, - Password: escapedElevatedPassword, + User: p.config.ElevatedUser, + Password: p.config.ElevatedPassword, TaskName: taskName, TaskDescription: "Packer elevated task", LogFile: logFile, diff --git a/provisioner/powershell/provisioner_test.go b/provisioner/powershell/provisioner_test.go index 2791601fa..749564d4d 100644 --- a/provisioner/powershell/provisioner_test.go +++ b/provisioner/powershell/provisioner_test.go @@ -518,12 +518,6 @@ func TestProvisioner_createFlattenedElevatedEnvVars_windows(t *testing.T) { {"FOO=bar", "BAZ=qux"}, // Multiple user env vars {"FOO=bar=baz"}, // User env var with value containing equals {"FOO==bar"}, // User env var with value starting with equals - // Test escaping of characters special to PowerShell - {"FOO=bar$baz"}, // User env var with value containing dollar - {"FOO=bar\"baz"}, // User env var with value containing a double quote - {"FOO=bar'baz"}, // User env var with value containing a single quote - {"FOO=bar`baz"}, // User env var with value containing a backtick - } expected := []string{ `$env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, @@ -531,10 +525,6 @@ func TestProvisioner_createFlattenedElevatedEnvVars_windows(t *testing.T) { `$env:BAZ="qux"; $env:FOO="bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, `$env:FOO="bar=baz"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, `$env:FOO="=bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, - "$env:FOO=\"bar`$baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", - "$env:FOO=\"bar`\"baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", - "$env:FOO=\"bar`'baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", - "$env:FOO=\"bar``baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", } p := new(Provisioner) @@ -563,11 +553,6 @@ func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) { {"FOO=bar", "BAZ=qux"}, // Multiple user env vars {"FOO=bar=baz"}, // User env var with value containing equals {"FOO==bar"}, // User env var with value starting with equals - // Test escaping of characters special to PowerShell - {"FOO=bar$baz"}, // User env var with value containing dollar - {"FOO=bar\"baz"}, // User env var with value containing a double quote - {"FOO=bar'baz"}, // User env var with value containing a single quote - {"FOO=bar`baz"}, // User env var with value containing a backtick } expected := []string{ `$env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, @@ -575,10 +560,6 @@ func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) { `$env:BAZ="qux"; $env:FOO="bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, `$env:FOO="bar=baz"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, `$env:FOO="=bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, - "$env:FOO=\"bar`$baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", - "$env:FOO=\"bar`\"baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", - "$env:FOO=\"bar`'baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", - "$env:FOO=\"bar``baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", } p := new(Provisioner) From addedbb680eb4d2ac4df064f712931f6b55bea4e Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 2 Feb 2018 09:58:39 -0800 Subject: [PATCH 152/251] Revert "Merge pull request #5515 from DanHam/dot-source-env-vars" revert so we can use a branch that's had more recent work done This reverts commit e56849c6056220b8dd555f841f7e0a5028b25af0, reversing changes made to 6d14eb6ea4ffa5cbaa0bad9d62d2dff09f946b15. --- provisioner/powershell/provisioner.go | 52 ++++-------- provisioner/powershell/provisioner_test.go | 85 ++++++------------- .../docs/provisioners/powershell.html.md | 50 +++++------ 3 files changed, 65 insertions(+), 122 deletions(-) diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go index 3d2f16f85..4297acf08 100644 --- a/provisioner/powershell/provisioner.go +++ b/provisioner/powershell/provisioner.go @@ -113,7 +113,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } if p.config.EnvVarFormat == "" { - p.config.EnvVarFormat = `$env:%s="%s"; ` + p.config.EnvVarFormat = `$env:%s=\"%s\"; ` } if p.config.ElevatedEnvVarFormat == "" { @@ -121,7 +121,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } if p.config.ExecuteCommand == "" { - p.config.ExecuteCommand = `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}';exit $LastExitCode }"` + p.config.ExecuteCommand = `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};{{.Vars}}&'{{.Path}}';exit $LastExitCode }"` } if p.config.ElevatedExecuteCommand == "" { @@ -331,19 +331,6 @@ func (p *Provisioner) retryable(f func() error) error { } } -// Enviroment variables required within the remote environment are uploaded within a PS script and -// then enabled by 'dot sourcing' the script immediately prior to execution of the main command -func (p *Provisioner) prepareEnvVars(elevated bool) (envVarPath string, err error) { - // Collate all required env vars into a plain string with required formatting applied - flattenedEnvVars := p.createFlattenedEnvVars(elevated) - // Create a powershell script on the target build fs containing the flattened env vars - envVarPath, err = p.uploadEnvVars(flattenedEnvVars) - if err != nil { - return "", err - } - return -} - func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { flattened = "" envVars := make(map[string]string) @@ -380,19 +367,6 @@ func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { return } -func (p *Provisioner) uploadEnvVars(flattenedEnvVars string) (envVarPath string, err error) { - // Upload all env vars to a powershell script on the target build file system - envVarReader := strings.NewReader(flattenedEnvVars) - uuid := uuid.TimeOrderedUUID() - envVarPath = fmt.Sprintf(`${env:SYSTEMROOT}\Temp\packer-env-vars-%s.ps1`, uuid) - log.Printf("Uploading env vars to %s", envVarPath) - err = p.communicator.Upload(envVarPath, envVarReader, nil) - if err != nil { - return "", fmt.Errorf("Error uploading ps script containing env vars: %s", err) - } - return -} - func (p *Provisioner) createCommandText() (command string, err error) { // Return the interpolated command if p.config.ElevatedUser == "" { @@ -403,15 +377,12 @@ func (p *Provisioner) createCommandText() (command string, err error) { } func (p *Provisioner) createCommandTextNonPrivileged() (command string, err error) { - // Prepare everything needed to enable the required env vars within the remote environment - envVarPath, err := p.prepareEnvVars(false) - if err != nil { - return "", err - } + // Create environment variables to set before executing the command + flattenedEnvVars := p.createFlattenedEnvVars(false) p.config.ctx.Data = &ExecuteCommandTemplate{ + Vars: flattenedEnvVars, Path: p.config.RemotePath, - Vars: envVarPath, } command, err = interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) @@ -424,10 +395,17 @@ func (p *Provisioner) createCommandTextNonPrivileged() (command string, err erro } func (p *Provisioner) createCommandTextPrivileged() (command string, err error) { - // Prepare everything needed to enable the required env vars within the remote environment - envVarPath, err := p.prepareEnvVars(false) + // Can't double escape the env vars, lets create shiny new ones + flattenedEnvVars := p.createFlattenedEnvVars(true) + // Need to create a mini ps1 script containing all of the environment variables we want; + // we'll be dot-sourcing this later + envVarReader := strings.NewReader(flattenedEnvVars) + uuid := uuid.TimeOrderedUUID() + envVarPath := fmt.Sprintf(`${env:SYSTEMROOT}\Temp\packer-env-vars-%s.ps1`, uuid) + log.Printf("Uploading env vars to %s", envVarPath) + err = p.communicator.Upload(envVarPath, envVarReader, nil) if err != nil { - return "", err + return "", fmt.Errorf("Error preparing elevated powershell script: %s", err) } p.config.ctx.Data = &ExecuteCommandTemplate{ diff --git a/provisioner/powershell/provisioner_test.go b/provisioner/powershell/provisioner_test.go index 749564d4d..e7e64d52e 100644 --- a/provisioner/powershell/provisioner_test.go +++ b/provisioner/powershell/provisioner_test.go @@ -79,8 +79,8 @@ func TestProvisionerPrepare_Defaults(t *testing.T) { t.Error("expected elevated_password to be empty") } - if p.config.ExecuteCommand != `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}';exit $LastExitCode }"` { - t.Fatalf(`Default command should be 'powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}';exit $LastExitCode }"', but got '%s'`, p.config.ExecuteCommand) + if p.config.ExecuteCommand != `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};{{.Vars}}&'{{.Path}}';exit $LastExitCode }"` { + t.Fatalf(`Default command should be 'powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};{{.Vars}}&'{{.Path}}';exit $LastExitCode }"', but got '%s'`, p.config.ExecuteCommand) } if p.config.ElevatedExecuteCommand != `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}'; exit $LastExitCode }"` { @@ -403,7 +403,7 @@ func TestProvisionerProvision_Inline(t *testing.T) { ui := testUi() p := new(Provisioner) - // Defaults provided by Packer - env vars should not appear in cmd + // Defaults provided by Packer p.config.PackerBuildName = "vmware" p.config.PackerBuilderType = "iso" comm := new(packer.MockCommunicator) @@ -413,14 +413,11 @@ func TestProvisionerProvision_Inline(t *testing.T) { t.Fatal("should not have error") } - cmd := comm.StartCmd.Command - re := regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/inlineScript.ps1';exit \$LastExitCode }"`) - matched := re.MatchString(cmd) - if !matched { - t.Fatalf("Got unexpected command: %s", cmd) + expectedCommand := `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; &'c:/Windows/Temp/inlineScript.ps1';exit $LastExitCode }"` + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) } - // User supplied env vars should not change things envVars := make([]string, 2) envVars[0] = "FOO=BAR" envVars[1] = "BAR=BAZ" @@ -433,11 +430,9 @@ func TestProvisionerProvision_Inline(t *testing.T) { t.Fatal("should not have error") } - cmd = comm.StartCmd.Command - re = regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/inlineScript.ps1';exit \$LastExitCode }"`) - matched = re.MatchString(cmd) - if !matched { - t.Fatalf("Got unexpected command: %s", cmd) + expectedCommand = `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:BAR=\"BAZ\"; $env:FOO=\"BAR\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; &'c:/Windows/Temp/inlineScript.ps1';exit $LastExitCode }"` + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) } } @@ -460,12 +455,11 @@ func TestProvisionerProvision_Scripts(t *testing.T) { t.Fatal("should not have error") } - cmd := comm.StartCmd.Command - re := regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/script.ps1';exit \$LastExitCode }"`) - matched := re.MatchString(cmd) - if !matched { - t.Fatalf("Got unexpected command: %s", cmd) + expectedCommand := `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:PACKER_BUILDER_TYPE=\"footype\"; $env:PACKER_BUILD_NAME=\"foobuild\"; &'c:/Windows/Temp/script.ps1';exit $LastExitCode }"` + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) } + } func TestProvisionerProvision_ScriptsWithEnvVars(t *testing.T) { @@ -494,11 +488,9 @@ func TestProvisionerProvision_ScriptsWithEnvVars(t *testing.T) { t.Fatal("should not have error") } - cmd := comm.StartCmd.Command - re := regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/script.ps1';exit \$LastExitCode }"`) - matched := re.MatchString(cmd) - if !matched { - t.Fatalf("Got unexpected command: %s", cmd) + expectedCommand := `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:BAR=\"BAZ\"; $env:FOO=\"BAR\"; $env:PACKER_BUILDER_TYPE=\"footype\"; $env:PACKER_BUILD_NAME=\"foobuild\"; &'c:/Windows/Temp/script.ps1';exit $LastExitCode }"` + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) } } @@ -555,11 +547,11 @@ func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) { {"FOO==bar"}, // User env var with value starting with equals } expected := []string{ - `$env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, - `$env:FOO="bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, - `$env:BAZ="qux"; $env:FOO="bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, - `$env:FOO="bar=baz"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, - `$env:FOO="=bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, + `$env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, + `$env:FOO=\"bar\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, + `$env:BAZ=\"qux\"; $env:FOO=\"bar\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, + `$env:FOO=\"bar=baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, + `$env:FOO=\"=bar\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, } p := new(Provisioner) @@ -579,6 +571,7 @@ func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) { } func TestProvision_createCommandText(t *testing.T) { + config := testConfig() config["remote_path"] = "c:/Windows/Temp/script.ps1" p := new(Provisioner) @@ -593,46 +586,22 @@ func TestProvision_createCommandText(t *testing.T) { // Non-elevated cmd, _ := p.createCommandText() - re := regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/script.ps1';exit \$LastExitCode }"`) - matched := re.MatchString(cmd) - if !matched { - t.Fatalf("Got unexpected command: %s", cmd) + expectedCommand := `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; &'c:/Windows/Temp/script.ps1';exit $LastExitCode }"` + + if cmd != expectedCommand { + t.Fatalf("Expected Non-elevated command: %s, got %s", expectedCommand, cmd) } // Elevated p.config.ElevatedUser = "vagrant" p.config.ElevatedPassword = "vagrant" cmd, _ = p.createCommandText() - re = regexp.MustCompile(`powershell -executionpolicy bypass -file "%TEMP%\\packer-elevated-shell-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1"`) - matched = re.MatchString(cmd) + matched, _ := regexp.MatchString("powershell -executionpolicy bypass -file \"%TEMP%(.{1})packer-elevated-shell.*", cmd) if !matched { t.Fatalf("Got unexpected elevated command: %s", cmd) } } -func TestProvision_uploadEnvVars(t *testing.T) { - p := new(Provisioner) - comm := new(packer.MockCommunicator) - p.communicator = comm - - flattenedEnvVars := `$env:PACKER_BUILDER_TYPE="footype"; $env:PACKER_BUILD_NAME="foobuild";` - - envVarPath, err := p.uploadEnvVars(flattenedEnvVars) - if err != nil { - t.Fatalf("Did not expect error: %s", err.Error()) - } - - if comm.UploadCalled != true { - t.Fatalf("Failed to upload env var file") - } - - re := regexp.MustCompile(`\${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1`) - matched := re.MatchString(envVarPath) - if !matched { - t.Fatalf("Got unexpected path for env var file: %s", envVarPath) - } -} - func TestProvision_generateElevatedShellRunner(t *testing.T) { // Non-elevated diff --git a/website/source/docs/provisioners/powershell.html.md b/website/source/docs/provisioners/powershell.html.md index a096bf2f5..f085a6e82 100644 --- a/website/source/docs/provisioners/powershell.html.md +++ b/website/source/docs/provisioners/powershell.html.md @@ -1,8 +1,8 @@ --- description: | - The shell Packer provisioner provisions machines built by Packer using - shell scripts. Shell provisioning is the easiest way to get software - installed and configured on a machine. + The shell Packer provisioner provisions machines built by Packer using shell + scripts. Shell provisioning is the easiest way to get software installed and + configured on a machine. layout: docs page_title: 'PowerShell - Provisioners' sidebar_current: 'docs-provisioners-powershell' @@ -29,21 +29,20 @@ The example below is fully functional. ## Configuration Reference The reference of available configuration options is listed below. The only -required element is either "inline" or "script". Every other option is -optional. +required element is either "inline" or "script". Every other option is optional. Exactly *one* of the following is required: - `inline` (array of strings) - This is an array of commands to execute. The - commands are concatenated by newlines and turned into a single file, so - they are all executed within the same context. This allows you to change + commands are concatenated by newlines and turned into a single file, so they + are all executed within the same context. This allows you to change directories in one command and use something in the directory in the next and so on. Inline scripts are the easiest way to pull off simple tasks within the machine. - `script` (string) - The path to a script to upload and execute in - the machine. This path can be absolute or relative. If it is relative, it - is relative to the working directory when Packer is executed. + the machine. This path can be absolute or relative. If it is relative, it is + relative to the working directory when Packer is executed. - `scripts` (array of strings) - An array of scripts to execute. The scripts will be uploaded and executed in the order specified. Each script is @@ -52,12 +51,12 @@ Exactly *one* of the following is required: Optional parameters: -- `binary` (boolean) - If true, specifies that the script(s) are binary - files, and Packer should therefore not convert Windows line endings to Unix - line endings (if there are any). By default this is false. +- `binary` (boolean) - If true, specifies that the script(s) are binary files, + and Packer should therefore not convert Windows line endings to Unix line + endings (if there are any). By default this is false. -- `elevated_execute_command` (string) - The command to use to execute the - elevated script. By default this is as follows: +- `elevated_execute_command` (string) - The command to use to execute the elevated + script. By default this is as follows: ``` powershell powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}'; exit $LastExitCode }" @@ -66,34 +65,32 @@ Optional parameters: The value of this is treated as [configuration template](/docs/templates/engine.html). There are two available variables: `Path`, which is the path to the script to run, and - `Vars`, which is the location of a temp file containing the list of - `environment_vars`, if configured. + `Vars`, which is the location of a temp file containing the list of `environment_vars`, if configured. - `environment_vars` (array of strings) - An array of key/value pairs to inject prior to the execute\_command. The format should be `key=value`. - Packer injects some environmental variables by default into the - environment, as well, which are covered in the section below. + Packer injects some environmental variables by default into the environment, + as well, which are covered in the section below. - `execute_command` (string) - The command to use to execute the script. By default this is as follows: ``` powershell - powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}'; exit $LastExitCode }" + powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};{{.Vars}}&'{{.Path}}';exit $LastExitCode }" ``` The value of this is treated as [configuration template](/docs/templates/engine.html). There are two available variables: `Path`, which is the path to the script to run, and - `Vars`, which is the location of a temp file containing the list of - `environment_vars`, if configured. + `Vars`, which is the list of `environment_vars`, if configured. - `elevated_user` and `elevated_password` (string) - If specified, the PowerShell script will be run with elevated privileges using the given Windows user. - `remote_path` (string) - The path where the script will be uploaded to in - the machine. This defaults to "c:/Windows/Temp/script.ps1". This value must - be a writable location and any parent directories must already exist. + the machine. This defaults to "c:/Windows/Temp/script.ps1". This value must be a + writable location and any parent directories must already exist. - `start_retry_timeout` (string) - The amount of time to attempt to *start* the remote process. By default this is "5m" or 5 minutes. This setting @@ -114,10 +111,9 @@ commonly useful environmental variables: This is most useful when Packer is making multiple builds and you want to distinguish them slightly from a common provisioning script. -- `PACKER_BUILDER_TYPE` is the type of the builder that was used to create - the machine that the script is running on. This is useful if you want to - run only certain parts of the script on systems built with certain - builders. +- `PACKER_BUILDER_TYPE` is the type of the builder that was used to create the + machine that the script is running on. This is useful if you want to run + only certain parts of the script on systems built with certain builders. - `PACKER_HTTP_ADDR` If using a builder that provides an http server for file transfer (such as hyperv, parallels, qemu, virtualbox, and vmware), this From c132bd867e61e0983dbef1e9a8247991af1aa35b Mon Sep 17 00:00:00 2001 From: James Nugent Date: Fri, 2 Feb 2018 13:14:13 -0600 Subject: [PATCH 153/251] vagrant: Correct name of vim package The non-X11-linked version of vim recently changed name from vim-lite to vim-console, which was preventing bootstrap. --- scripts/vagrant-freebsd-priv-config.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/vagrant-freebsd-priv-config.sh b/scripts/vagrant-freebsd-priv-config.sh index 30a2e8185..e2ea148dc 100755 --- a/scripts/vagrant-freebsd-priv-config.sh +++ b/scripts/vagrant-freebsd-priv-config.sh @@ -17,7 +17,7 @@ EOT pkg update pkg install -y \ - editors/vim-lite \ + editors/vim-console \ devel/git \ devel/gmake \ lang/go \ From 399431c00b502f633f34cdb0f7a2533b0bb9c80d Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 2 Feb 2018 12:19:04 -0800 Subject: [PATCH 154/251] group oracle builders under a single heading. --- .../docs/builders/oracle-classic.html.md | 32 +++++++++---------- .../source/docs/builders/oracle-oci.html.md | 2 +- website/source/docs/builders/oracle.html.md | 22 +++++++++++++ website/source/layouts/docs.erb | 15 ++++++--- 4 files changed, 49 insertions(+), 22 deletions(-) create mode 100644 website/source/docs/builders/oracle.html.md diff --git a/website/source/docs/builders/oracle-classic.html.md b/website/source/docs/builders/oracle-classic.html.md index 0d9dc05b7..f8991b968 100644 --- a/website/source/docs/builders/oracle-classic.html.md +++ b/website/source/docs/builders/oracle-classic.html.md @@ -1,7 +1,7 @@ --- -description: +description: | The oracle-classic builder is able to create new custom images for use with Oracle - Compute Cloud. + Cloud Infrastructure Classic Compute. layout: docs page_title: 'Oracle Cloud Infrastructure Classic - Builders' sidebar_current: 'docs-builders-oracle-classic' @@ -25,7 +25,7 @@ to use it or delete it. ## Authorization -This builder authenticates API calls to Oracle Cloud Infrastructure Classic Compute using basic +This builder authenticates API calls to Oracle Cloud Infrastructure Classic Compute using basic authentication (user name and password). To read more, see the [authentication documentation](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsa/Authentication.html) @@ -36,11 +36,11 @@ This builder currently only works with the SSH communicator. ### Required - - `api_endpoint` (string) - This is your custom API endpoint for sending - requests. Instructions for determining your API endpoint can be found + - `api_endpoint` (string) - This is your custom API endpoint for sending + requests. Instructions for determining your API endpoint can be found [here](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsa/SendRequests.html) - - `dest_image_list` (string) - Where to save the machine image to once you've + - `dest_image_list` (string) - Where to save the machine image to once you've provisioned it. If the provided image list does not exist, Packer will create it. - `identity_domain` (string) - This is your customer-specific identity domain @@ -50,7 +50,7 @@ This builder currently only works with the SSH communicator. - `source_image_list` (string) - This is what image you want to use as your base image. See the [documentation](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsg/listing-machine-images.html) - for more details. You may use either a public image list, or a private image list. To see what public image lists are available, you can use + for more details. You may use either a public image list, or a private image list. To see what public image lists are available, you can use the CLI, as described [here](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stopc/image-lists-stclr-and-nmcli.html#GUID-DB7E75FE-F752-4FF7-AB70-3C8DCDFCA0FA) - `password` (string) - Your account password. @@ -63,21 +63,21 @@ This builder currently only works with the SSH communicator. ### Optional - - `image_description` (string) - a description for your destination - image list. If you don't provide one, Packer will provide a generic description. + - `image_description` (string) - a description for your destination + image list. If you don't provide one, Packer will provide a generic description. - - `ssh_username` (string) - The username that Packer will use to SSH into the - instance; defaults to `opc`, the default oracle user, which has sudo - priveliges. If you have already configured users on your machine, you may - prompt Packer to use one of those instead. For more detail, see the + - `ssh_username` (string) - The username that Packer will use to SSH into the + instance; defaults to `opc`, the default oracle user, which has sudo + priveliges. If you have already configured users on your machine, you may + prompt Packer to use one of those instead. For more detail, see the [documentation](https://docs.oracle.com/en/cloud/iaas/compute-iaas-cloud/stcsg/accessing-oracle-linux-instance-using-ssh.html). - `image_name` (string) - The name to assign to the resulting custom image. ## Basic Example -Here is a basic example. Note that account specific configuration has been -obfuscated; you will need to add a working `username`, `password`, +Here is a basic example. Note that account specific configuration has been +obfuscated; you will need to add a working `username`, `password`, `identity_domain`, and `api_endpoint` in order for the example to work. ``` {.json} @@ -90,7 +90,7 @@ obfuscated; you will need to add a working `username`, `password`, "identity_domain": "#######", "api_endpoint": "https://api-###.compute.###.oraclecloud.com/", "source_image_list": "/oracle/public/OL_7.2_UEKR4_x86_64", - "shape": "oc3", + "shape": "oc3", "image_name": "Packer_Builder_Test_{{timestamp}}" "dest_image_list": "Packer_Builder_Test_List" } diff --git a/website/source/docs/builders/oracle-oci.html.md b/website/source/docs/builders/oracle-oci.html.md index 3503d4fca..6271c5830 100644 --- a/website/source/docs/builders/oracle-oci.html.md +++ b/website/source/docs/builders/oracle-oci.html.md @@ -1,5 +1,5 @@ --- -description: +description: | The oracle-oci builder is able to create new custom images for use with Oracle Cloud Infrastructure (OCI). layout: docs diff --git a/website/source/docs/builders/oracle.html.md b/website/source/docs/builders/oracle.html.md new file mode 100644 index 000000000..70a1917d0 --- /dev/null +++ b/website/source/docs/builders/oracle.html.md @@ -0,0 +1,22 @@ +--- +description: | + Packer is able to create custom images using Oracle Cloud Infrastructure. +layout: docs +page_title: 'Oracle - Builders' +sidebar_current: 'docs-builders-oracle' +--- + +# Oracle Builder + +Packer is able to create custom images on both Oracle Cloud Infrastructure and +Oracle Cloud Infrastructure Classic Compute. Packer comes with builders +designed to support both platforms. Please choose the one that's right for you: + +- [oracle-classic](/docs/builders/oracle-classic.html) - Create custom images + in Oracle Cloud Infrastructure Classic Compute by launching a source + instance and creating an image list from a snapshot of it after + provisioning. + +- [oracle-oci](/docs/builders/amazon-instance.html) - Create custom images in + Oracle Cloud Infrastructure (OCI) by launching a base instance and creating + an image from it after provisioning. diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 739b33733..8470e0d35 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -131,11 +131,16 @@ > OpenStack - > - Oracle Classic - - > - Oracle OCI + > + Oracle + > Parallels From c98a074f0dd14a646a3eb919397f38b2a8269ba5 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Fri, 2 Feb 2018 18:58:42 -0600 Subject: [PATCH 155/251] Renamed common/config.go's SupportedURL to SupportedProtocol as suggested by @SwampDragons. --- common/config.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/common/config.go b/common/config.go index 6850005d7..269d52dd1 100644 --- a/common/config.go +++ b/common/config.go @@ -44,9 +44,9 @@ func ChooseString(vals ...string) string { return "" } -// SupportedURL verifies that the url passed is actually supported or not +// SupportedProtocol verifies that the url passed is actually supported or not // This will also validate that the protocol is one that's actually implemented. -func SupportedURL(u *url.URL) bool { +func SupportedProtocol(u *url.URL) bool { // url.Parse shouldn't return nil except on error....but it can. if u == nil { return false @@ -159,7 +159,7 @@ func ValidatedURL(original string) (string, error) { } // We should now have a url, so verify that it's a protocol we support. - if !SupportedURL(u) { + if !SupportedProtocol(u) { return "", fmt.Errorf("Unsupported protocol scheme! (%#v)", u) } From 75d3ea7cee0ee2b7a740dcfd62f8a890a0c0dbc8 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Wed, 11 Nov 2015 06:39:06 -0600 Subject: [PATCH 156/251] Added support for sound, serial ports, parallel ports, usb, and specifying a default network to the vmware builder. builder/vmware/{iso,vmx}: Added the specific configuration options that get parsed. Normalize paths when pulling them from the json template so that they'll work on Windows too. Added some improved error checking when parsing these options. Stash the vm's network connection type so that other steps can figure out addressing information Modified the esx5 driver to support the new addressing logic. Modified the template in step_create_vmx to include the new options. builder/vmware/common: Implemented a parser for vmware's configuration files to the vmware builder. Modified the driver's interface to include support for resolving both guest/host hw and ip addresses Implemented a base structure with some methods that implement these features. Rewrote all ip and mac address dependent code to utilize these new methods. Removed host_ip and guest_ip due to their logic being moved directly into a base-structure used by each driver. The code was explicitly checking runtime.GOOS instead of portably using net.Interfaces() anyways. Updated driver_mock to support the new addressing methods --- builder/vmware/common/driver.go | 250 ++++- builder/vmware/common/driver_fusion5.go | 11 +- builder/vmware/common/driver_mock.go | 60 ++ builder/vmware/common/driver_parser.go | 974 ++++++++++++++++++ builder/vmware/common/driver_player5.go | 24 +- .../vmware/common/driver_player5_windows.go | 25 + builder/vmware/common/driver_workstation10.go | 1 + builder/vmware/common/driver_workstation9.go | 27 +- .../common/driver_workstation9_windows.go | 8 + .../vmware/common/driver_workstation_unix.go | 8 + builder/vmware/common/guest_ip.go | 90 -- builder/vmware/common/guest_ip_test.go | 82 -- builder/vmware/common/host_ip.go | 7 - .../vmware/common/host_ip_ifconfig_test.go | 11 - builder/vmware/common/host_ip_vmnetnatconf.go | 65 -- .../common/host_ip_vmnetnatconf_test.go | 11 - builder/vmware/common/ssh.go | 24 +- .../vmware/common/step_type_boot_command.go | 13 +- builder/vmware/iso/builder.go | 32 + builder/vmware/iso/driver_esx5.go | 11 +- builder/vmware/iso/step_create_vmx.go | 242 ++++- builder/vmware/vmx/step_clone_vmx.go | 10 + 22 files changed, 1648 insertions(+), 338 deletions(-) create mode 100644 builder/vmware/common/driver_parser.go delete mode 100644 builder/vmware/common/guest_ip.go delete mode 100644 builder/vmware/common/guest_ip_test.go delete mode 100644 builder/vmware/common/host_ip.go delete mode 100644 builder/vmware/common/host_ip_ifconfig_test.go delete mode 100644 builder/vmware/common/host_ip_vmnetnatconf.go delete mode 100644 builder/vmware/common/host_ip_vmnetnatconf_test.go diff --git a/builder/vmware/common/driver.go b/builder/vmware/common/driver.go index d5f8b44f4..1098e591f 100644 --- a/builder/vmware/common/driver.go +++ b/builder/vmware/common/driver.go @@ -1,14 +1,19 @@ package common import ( + "errors" "bytes" "fmt" "log" + "os" "os/exec" + "io/ioutil" "regexp" "runtime" "strconv" "strings" + "time" + "net" "github.com/hashicorp/packer/helper/multistep" ) @@ -29,10 +34,6 @@ type Driver interface { // Checks if the VMX file at the given path is running. IsRunning(string) (bool, error) - // CommHost returns the host address for the VM that is being - // managed by this driver. - CommHost(multistep.StateBag) (string, error) - // Start starts a VM specified by the path to the VMX given. Start(string, bool) error @@ -49,14 +50,32 @@ type Driver interface { // Attach the VMware tools ISO ToolsInstall() error - // Get the path to the DHCP leases file for the given device. - DhcpLeasesPath(string) string - // Verify checks to make sure that this driver should function // properly. This should check that all the files it will use // appear to exist and so on. If everything is okay, this doesn't - // return an error. Otherwise, this returns an error. + // return an error. Otherwise, this returns an error. Each vmware + // driver should assign the VmwareMachine callback functions for locating + // paths within this function. Verify() error + + /// This is to establish a connection to the guest + CommHost(multistep.StateBag) (string, error) + + /// These methods are generally implemented by the VmwareDriver + /// structure within this file. A driver implementation can + /// reimplement these, though, if it wants. + + // Get the guest hw address for the vm + GuestAddress(multistep.StateBag) (string, error) + + // Get the guest ip address for the vm + GuestIP(multistep.StateBag) (string, error) + + // Get the host hw address for the vm + HostAddress(multistep.StateBag) (string, error) + + // Get the host ip address for the vm + HostIP(multistep.StateBag) (string, error) } // NewDriver returns a new driver implementation for this operating @@ -192,3 +211,218 @@ func compareVersions(versionFound string, versionWanted string, product string) return nil } + +// helper functions that read configuration information from a file +func readNetmapConfig(path string) (NetworkMap,error) { + fd,err := os.Open(path) + if err != nil { return nil, err } + defer fd.Close() + return ReadNetworkMap(fd) +} + +func readDhcpConfig(path string) (DhcpConfiguration,error) { + fd,err := os.Open(path) + if err != nil { return nil, err } + defer fd.Close() + return ReadDhcpConfiguration(fd) +} + +// This VmwareDriver is a base class that contains default methods +// that a Driver can use or implement themselves. +type VmwareDriver struct { + /// These methods define paths that are utilized by the driver + /// A driver must overload these in order to point to the correct + /// files so that the address detection (ip and ethernet) machinery + /// works. + DhcpLeasesPath func(string) string + VmnetnatConfPath func() string + DhcpConfPath func() string + NetmapConfPath func() string +} + +func (d *VmwareDriver) GuestAddress(state multistep.StateBag) (string,error) { + vmxPath := state.Get("vmx_path").(string) + + log.Println("Lookup up IP information...") + f, err := os.Open(vmxPath) + if err != nil { + return "", err + } + defer f.Close() + + vmxBytes, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + vmxData := ParseVMX(string(vmxBytes)) + + var ok bool + macAddress := "" + if macAddress, ok = vmxData["ethernet0.address"]; !ok || macAddress == "" { + if macAddress, ok = vmxData["ethernet0.generatedaddress"]; !ok || macAddress == "" { + return "", errors.New("couldn't find MAC address in VMX") + } + } + + res,err := net.ParseMAC(macAddress) + if err != nil { return "", err } + + return res.String(),nil +} + +func (d *VmwareDriver) GuestIP(state multistep.StateBag) (string,error) { + + // read netmap config + pathNetmap := d.NetmapConfPath() + if _, err := os.Stat(pathNetmap); err != nil { + return "", fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) + } + netmap,err := readNetmapConfig(pathNetmap) + if err != nil { return "",err } + + // convert the stashed network to a device + network := state.Get("vmnetwork").(string) + device,err := netmap.NameIntoDevice(network) + if err != nil { return "", err } + + // figure out our MAC address for looking up the guest address + MACAddress,err := d.GuestAddress(state) + if err != nil { return "", err } + + // figure out the correct dhcp leases + dhcpLeasesPath := d.DhcpLeasesPath(device) + log.Printf("DHCP leases path: %s", dhcpLeasesPath) + if dhcpLeasesPath == "" { + return "", errors.New("no DHCP leases path found.") + } + + // open up the lease and read its contents + fh, err := os.Open(dhcpLeasesPath) + if err != nil { + return "", err + } + defer fh.Close() + + dhcpBytes, err := ioutil.ReadAll(fh) + if err != nil { + return "", err + } + + // start grepping through the file looking for fields that we care about + var lastIp string + var lastLeaseEnd time.Time + + var curIp string + var curLeaseEnd time.Time + + ipLineRe := regexp.MustCompile(`^lease (.+?) {$`) + endTimeLineRe := regexp.MustCompile(`^\s*ends \d (.+?);$`) + macLineRe := regexp.MustCompile(`^\s*hardware ethernet (.+?);$`) + + for _, line := range strings.Split(string(dhcpBytes), "\n") { + // Need to trim off CR character when running in windows + line = strings.TrimRight(line, "\r") + + matches := ipLineRe.FindStringSubmatch(line) + if matches != nil { + lastIp = matches[1] + continue + } + + matches = endTimeLineRe.FindStringSubmatch(line) + if matches != nil { + lastLeaseEnd, _ = time.Parse("2006/01/02 15:04:05", matches[1]) + continue + } + + // If the mac address matches and this lease ends farther in the + // future than the last match we might have, then choose it. + matches = macLineRe.FindStringSubmatch(line) + if matches != nil && strings.EqualFold(matches[1], MACAddress) && curLeaseEnd.Before(lastLeaseEnd) { + curIp = lastIp + curLeaseEnd = lastLeaseEnd + } + } + if curIp == "" { + return "", fmt.Errorf("IP not found for MAC %s in DHCP leases at %s", MACAddress, dhcpLeasesPath) + } + return curIp, nil +} + +func (d *VmwareDriver) HostAddress(state multistep.StateBag) (string,error) { + + // parse network<->device mapping + pathNetmap := d.NetmapConfPath() + if _, err := os.Stat(pathNetmap); err != nil { + return "", fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) + } + netmap,err := readNetmapConfig(pathNetmap) + if err != nil { return "",err } + + // parse dhcpd configuration + pathDhcpConfig := d.DhcpConfPath() + if _, err := os.Stat(pathDhcpConfig); err != nil { + return "", fmt.Errorf("Could not find vmnetdhcp conf file: %s", pathDhcpConfig) + } + config,err := readDhcpConfig(pathDhcpConfig) + if err != nil { return "",err } + + // convert network to name + network := state.Get("vmnetwork").(string) + device,err := netmap.NameIntoDevice(network) + if err != nil { return "", err } + + // find the entry configured in the dhcpd + interfaceConfig,err := config.HostByName(device) + if err != nil { return "", err } + + // finally grab the hardware address + address,err := interfaceConfig.Hardware() + if err == nil { return address.String(), nil } + + // we didn't find it, so search through our interfaces for the device name + interfaceList,err := net.Interfaces() + if err == nil { return "", err } + + names := make([]string, 0) + for _,intf := range interfaceList { + if strings.HasSuffix( strings.ToLower(intf.Name), device ) { + return intf.HardwareAddr.String(),nil + } + names = append(names, intf.Name) + } + return "",fmt.Errorf("Unable to find device %s : %v", device, names) +} + +func (d *VmwareDriver) HostIP(state multistep.StateBag) (string,error) { + + // parse network<->device mapping + pathNetmap := d.NetmapConfPath() + if _, err := os.Stat(pathNetmap); err != nil { + return "", fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) + } + netmap,err := readNetmapConfig(pathNetmap) + if err != nil { return "",err } + + // parse dhcpd configuration + pathDhcpConfig := d.DhcpConfPath() + if _, err := os.Stat(pathDhcpConfig); err != nil { + return "", fmt.Errorf("Could not find vmnetdhcp conf file: %s", pathDhcpConfig) + } + config,err := readDhcpConfig(pathDhcpConfig) + if err != nil { return "",err } + + // convert network to name + network := state.Get("vmnetwork").(string) + device,err := netmap.NameIntoDevice(network) + if err != nil { return "", err } + + // find the entry configured in the dhcpd + interfaceConfig,err := config.HostByName(device) + if err != nil { return "", err } + + address,err := interfaceConfig.IP4() + if err != nil { return "", err } + + return address.String(),nil +} diff --git a/builder/vmware/common/driver_fusion5.go b/builder/vmware/common/driver_fusion5.go index 9545d3e81..4bc5ae301 100644 --- a/builder/vmware/common/driver_fusion5.go +++ b/builder/vmware/common/driver_fusion5.go @@ -14,6 +14,8 @@ import ( // Fusion5Driver is a driver that can run VMware Fusion 5. type Fusion5Driver struct { + VmwareDriver + // This is the path to the "VMware Fusion.app" AppPath string @@ -139,6 +141,11 @@ func (d *Fusion5Driver) Verify() error { return err } + // default paths + d.VmwareDriver.DhcpLeasesPath = func(device string) string { + return "/var/db/vmware/vmnet-dhcpd-" + device + ".leases" + } + return nil } @@ -158,10 +165,6 @@ func (d *Fusion5Driver) ToolsInstall() error { return nil } -func (d *Fusion5Driver) DhcpLeasesPath(device string) string { - return "/var/db/vmware/vmnet-dhcpd-" + device + ".leases" -} - const fusionSuppressPlist = ` diff --git a/builder/vmware/common/driver_mock.go b/builder/vmware/common/driver_mock.go index 9291be976..15e006403 100644 --- a/builder/vmware/common/driver_mock.go +++ b/builder/vmware/common/driver_mock.go @@ -34,6 +34,26 @@ type DriverMock struct { CommHostResult string CommHostErr error + HostAddressCalled bool + HostAddressState multistep.StateBag + HostAddressResult string + HostAddressErr error + + HostIPCalled bool + HostIPState multistep.StateBag + HostIPResult string + HostIPErr error + + GuestAddressCalled bool + GuestAddressState multistep.StateBag + GuestAddressResult string + GuestAddressErr error + + GuestIPCalled bool + GuestIPState multistep.StateBag + GuestIPResult string + GuestIPErr error + StartCalled bool StartPath string StartHeadless bool @@ -58,6 +78,12 @@ type DriverMock struct { DhcpLeasesPathDevice string DhcpLeasesPathResult string + NetmapPathCalled bool + NetmapPathResult string + + DhcpConfPathCalled bool + DhcpConfPathResult string + VerifyCalled bool VerifyErr error } @@ -98,6 +124,30 @@ func (d *DriverMock) CommHost(state multistep.StateBag) (string, error) { return d.CommHostResult, d.CommHostErr } +func (d *DriverMock) HostAddress(state multistep.StateBag) (string, error) { + d.HostAddressCalled = true + d.HostAddressState = state + return d.HostAddressResult, d.HostAddressErr +} + +func (d *DriverMock) HostIP(state multistep.StateBag) (string, error) { + d.HostIPCalled = true + d.HostIPState = state + return d.HostIPResult, d.HostIPErr +} + +func (d *DriverMock) GuestAddress(state multistep.StateBag) (string, error) { + d.GuestAddressCalled = true + d.GuestAddressState = state + return d.GuestAddressResult, d.GuestAddressErr +} + +func (d *DriverMock) GuestIP(state multistep.StateBag) (string, error) { + d.GuestIPCalled = true + d.GuestIPState = state + return d.GuestIPResult, d.GuestIPErr +} + func (d *DriverMock) Start(path string, headless bool) error { d.StartCalled = true d.StartPath = path @@ -134,6 +184,16 @@ func (d *DriverMock) DhcpLeasesPath(device string) string { return d.DhcpLeasesPathResult } +func (d *DriverMock) NetmapPath(device string) string { + d.NetmapPathCalled = true + return d.NetmapPathResult +} + +func (d *DriverMock) DhcpConfPath(device string) string { + d.DhcpConfPathCalled = true + return d.DhcpConfPathResult +} + func (d *DriverMock) Verify() error { d.VerifyCalled = true return d.VerifyErr diff --git a/builder/vmware/common/driver_parser.go b/builder/vmware/common/driver_parser.go new file mode 100644 index 000000000..38470eb5b --- /dev/null +++ b/builder/vmware/common/driver_parser.go @@ -0,0 +1,974 @@ +package common +import ( + "fmt" + "os" + "io" + "strings" + "strconv" + "net" + "sort" +) + +type sentinelSignaller chan struct{} + +/** low-level parsing */ +// strip the comments and extraneous newlines from a byte channel +func uncomment(eof sentinelSignaller, in <-chan byte) chan byte { + out := make(chan byte) + + go func(in <-chan byte, out chan byte) { + var endofline bool + for stillReading := true; stillReading; { + select { + case <-eof: + stillReading = false + case ch := <-in: + switch ch { + case '#': + endofline = true + case '\n': + if endofline { endofline = false } + } + if !endofline { out <- ch } + } + } + }(in, out) + return out +} + +// convert a byte channel into a channel of pseudo-tokens +func tokenizeDhcpConfig(eof sentinelSignaller, in chan byte) chan string { + var ch byte + var state string + var quote bool + + out := make(chan string) + go func(out chan string) { + for stillReading := true; stillReading; { + select { + case <-eof: + stillReading = false + + case ch = <-in: + if quote { + if ch == '"' { + out <- state + string(ch) + state,quote = "",false + continue + } + state += string(ch) + continue + } + + switch ch { + case '"': + quote = true + state += string(ch) + continue + + case '\r': + fallthrough + case '\n': + fallthrough + case '\t': + fallthrough + case ' ': + if len(state) == 0 { continue } + out <- state + state = "" + + case '{': fallthrough + case '}': fallthrough + case ';': + if len(state) > 0 { out <- state } + out <- string(ch) + state = "" + + default: + state += string(ch) + } + } + } + if len(state) > 0 { out <- state } + }(out) + return out +} + +/** mid-level parsing */ +type tkParameter struct { + name string + operand []string +} +func (e *tkParameter) String() string { + var values []string + for _,val := range e.operand { + values = append(values, val) + } + return fmt.Sprintf("%s [%s]", e.name, strings.Join(values, ",")) +} + +type tkGroup struct { + parent *tkGroup + id tkParameter + + groups []*tkGroup + params []tkParameter +} +func (e *tkGroup) String() string { + var id []string + + id = append(id, e.id.name) + for _,val := range e.id.operand { + id = append(id, val) + } + + var config []string + for _,val := range e.params { + config = append(config, val.String()) + } + return fmt.Sprintf("%s {\n%s\n}", strings.Join(id, " "), strings.Join(config, "\n")) +} + +// convert a channel of pseudo-tokens into an tkParameter struct +func parseTokenParameter(in chan string) tkParameter { + var result tkParameter + for { + token := <-in + if result.name == "" { + result.name = token + continue + } + switch token { + case "{": fallthrough + case "}": fallthrough + case ";": goto leave + default: + result.operand = append(result.operand, token) + } + } +leave: + return result +} + +// convert a channel of pseudo-tokens into an tkGroup tree */ +func parseDhcpConfig(eof sentinelSignaller, in chan string) (tkGroup,error) { + var tokens []string + var result tkGroup + + toParameter := func(tokens []string) tkParameter { + out := make(chan string) + go func(out chan string){ + for _,v := range tokens { out <- v } + out <- ";" + }(out) + return parseTokenParameter(out) + } + + for stillReading,currentgroup := true,&result; stillReading; { + select { + case <-eof: + stillReading = false + + case tk := <-in: + switch tk { + case "{": + grp := &tkGroup{parent:currentgroup} + grp.id = toParameter(tokens) + currentgroup.groups = append(currentgroup.groups, grp) + currentgroup = grp + case "}": + if currentgroup.parent == nil { + return tkGroup{}, fmt.Errorf("Unable to close the global declaration") + } + if len(tokens) > 0 { + return tkGroup{}, fmt.Errorf("List of tokens was left unterminated : %v", tokens) + } + currentgroup = currentgroup.parent + case ";": + arg := toParameter(tokens) + currentgroup.params = append(currentgroup.params, arg) + default: + tokens = append(tokens, tk) + continue + } + tokens = []string{} + } + } + return result,nil +} + +func tokenizeNetworkMapConfig(eof sentinelSignaller, in chan byte) chan string { + var ch byte + var state string + var quote bool + var lastnewline bool + + out := make(chan string) + go func(out chan string) { + for stillReading := true; stillReading; { + select { + case <-eof: + stillReading = false + + case ch = <-in: + if quote { + if ch == '"' { + out <- state + string(ch) + state,quote = "",false + continue + } + state += string(ch) + continue + } + + switch ch { + case '"': + quote = true + state += string(ch) + continue + + case '\r': + fallthrough + case '\t': + fallthrough + case ' ': + if len(state) == 0 { continue } + out <- state + state = "" + + case '\n': + if lastnewline { continue } + if len(state) > 0 { out <- state } + out <- string(ch) + state = "" + lastnewline = true + continue + + case '.': fallthrough + case '=': + if len(state) > 0 { out <- state } + out <- string(ch) + state = "" + + default: + state += string(ch) + } + lastnewline = false + } + } + if len(state) > 0 { out <- state } + }(out) + return out +} + +func parseNetworkMapConfig(eof sentinelSignaller, in chan string) (NetworkMap,error) { + var unsorted map[string]map[string]string + var state []string + + addResult := func(network string, attribute string, value string) error { + _,ok := unsorted[network] + if !ok { unsorted[network] = make(map[string]string) } + + val,err := strconv.Unquote(value) + if err != nil { return err } + + current := unsorted[network] + current[attribute] = val + return nil + } + + stillReading := true + for unsorted = make(map[string]map[string]string); stillReading; { + select { + case <-eof: + if len(state) == 3 { + err := addResult(state[0], state[1], state[2]) + if err != nil { return nil,err } + } + stillReading = false + case tk := <-in: + switch tk { + case ".": + if len(state) != 1 { return nil,fmt.Errorf("Missing network index") } + case "=": + if len(state) != 2 { return nil,fmt.Errorf("Assignment to empty attribute") } + case "\n": + if len(state) == 0 { continue } + if len(state) != 3 { return nil,fmt.Errorf("Invalid attribute assignment : %v", state) } + err := addResult(state[0], state[1], state[2]) + if err != nil { return nil,err } + state = make([]string, 0) + default: + state = append(state, tk) + } + } + } + result := make([]map[string]string, 0) + var keys []string + for k := range unsorted { keys = append(keys, k) } + sort.Strings(keys) + for _,k := range keys { + result = append(result, unsorted[k]) + } + return result,nil +} + +/** higher-level parsing */ +/// parameters +type pParameter interface { repr() string } + +type pParameterInclude struct { + filename string +} +func (e pParameterInclude) repr() string { return fmt.Sprintf("include-file:filename=%s",e.filename) } + +type pParameterOption struct { + name string + value string +} +func (e pParameterOption) repr() string { return fmt.Sprintf("option:%s=%s",e.name,e.value) } + +// allow some-kind-of-something +type pParameterGrant struct { + verb string // allow,deny,ignore + attribute string +} +func (e pParameterGrant) repr() string { return fmt.Sprintf("grant:%s,%s",e.verb,e.attribute) } + +type pParameterAddress4 []string +func (e pParameterAddress4) repr() string { + return fmt.Sprintf("fixed-address4:%s",strings.Join(e,",")) +} + +type pParameterAddress6 []string +func (e pParameterAddress6) repr() string { + return fmt.Sprintf("fixed-address6:%s",strings.Join(e,",")) +} + +// hardware address 00:00:00:00:00:00 +type pParameterHardware struct { + class string + address []byte +} +func (e pParameterHardware) repr() string { + res := make([]string, 0) + for _,v := range e.address { + res = append(res, fmt.Sprintf("%02x",v)) + } + return fmt.Sprintf("hardware-address:%s[%s]",e.class,strings.Join(res,":")) +} + +type pParameterBoolean struct { + parameter string + truancy bool +} +func (e pParameterBoolean) repr() string { return fmt.Sprintf("boolean:%s=%s",e.parameter,e.truancy) } + +type pParameterClientMatch struct { + name string + data string +} +func (e pParameterClientMatch) repr() string { return fmt.Sprintf("match-client:%s=%s",e.name,e.data) } + +// range 127.0.0.1 127.0.0.255 +type pParameterRange4 struct { + min net.IP + max net.IP +} +func (e pParameterRange4) repr() string { return fmt.Sprintf("range4:%s-%s",e.min.String(),e.max.String()) } + +type pParameterRange6 struct { + min net.IP + max net.IP +} +func (e pParameterRange6) repr() string { return fmt.Sprintf("range6:%s-%s",e.min.String(),e.max.String()) } + +type pParameterPrefix6 struct { + min net.IP + max net.IP + bits int +} +func (e pParameterPrefix6) repr() string { return fmt.Sprintf("prefix6:/%d:%s-%s",e.bits,e.min.String(),e.max.String()) } + +// some-kind-of-parameter 1024 +type pParameterOther struct { + parameter string + value string +} +func (e pParameterOther) repr() string { return fmt.Sprintf("parameter:%s=%s",e.parameter,e.value) } + +type pParameterExpression struct { + parameter string + expression string +} +func (e pParameterExpression) repr() string { return fmt.Sprintf("parameter-expression:%s=\"%s\"",e.parameter,e.expression) } + +type pDeclarationIdentifier interface { repr() string } + +type pDeclaration struct { + id pDeclarationIdentifier + parent *pDeclaration + parameters []pParameter + declarations []pDeclaration +} + +func (e *pDeclaration) short() string { + return e.id.repr() +} + +func (e *pDeclaration) repr() string { + res := e.short() + + var parameters []string + for _,v := range e.parameters { + parameters = append(parameters, v.repr()) + } + + var groups []string + for _,v := range e.declarations { + groups = append(groups, fmt.Sprintf("-> %s",v.short())) + } + + if e.parent != nil { + res = fmt.Sprintf("%s parent:%s",res,e.parent.short()) + } + return fmt.Sprintf("%s\n%s\n%s\n", res, strings.Join(parameters,"\n"), strings.Join(groups,"\n")) +} + +type pDeclarationGlobal struct {} +func (e pDeclarationGlobal) repr() string { return fmt.Sprintf("{global}") } + +type pDeclarationShared struct { name string } +func (e pDeclarationShared) repr() string { return fmt.Sprintf("{shared-network %s}", e.name) } + +type pDeclarationSubnet4 struct { net.IPNet } +func (e pDeclarationSubnet4) repr() string { return fmt.Sprintf("{subnet4 %s}", e.String()) } + +type pDeclarationSubnet6 struct { net.IPNet } +func (e pDeclarationSubnet6) repr() string { return fmt.Sprintf("{subnet6 %s}", e.String()) } + +type pDeclarationHost struct { name string } +func (e pDeclarationHost) repr() string { return fmt.Sprintf("{host name:%s}", e.name) } + +type pDeclarationPool struct {} +func (e pDeclarationPool) repr() string { return fmt.Sprintf("{pool}") } + +type pDeclarationGroup struct {} +func (e pDeclarationGroup) repr() string { return fmt.Sprintf("{group}") } + +type pDeclarationClass struct { name string } +func (e pDeclarationClass) repr() string { return fmt.Sprintf("{class}") } + +/** parsers */ +func parseParameter(val tkParameter) (pParameter,error) { + switch val.name { + case "include": + if len(val.operand) != 2 { + return nil,fmt.Errorf("Invalid number of parameters for pParameterInclude : %v",val.operand) + } + name := val.operand[0] + return pParameterInclude{filename: name},nil + + case "option": + if len(val.operand) != 2 { + return nil,fmt.Errorf("Invalid number of parameters for pParameterOption : %v",val.operand) + } + name, value := val.operand[0], val.operand[1] + return pParameterOption{name: name, value: value},nil + + case "allow": fallthrough + case "deny": fallthrough + case "ignore": + if len(val.operand) < 1 { + return nil,fmt.Errorf("Invalid number of parameters for pParameterGrant : %v",val.operand) + } + attribute := strings.Join(val.operand," ") + return pParameterGrant{verb: strings.ToLower(val.name), attribute: attribute},nil + + case "range": + if len(val.operand) < 1 { + return nil,fmt.Errorf("Invalid number of parameters for pParameterRange4 : %v",val.operand) + } + idxAddress := map[bool]int{true:1,false:0}[strings.ToLower(val.operand[0]) == "bootp"] + if len(val.operand) > 2 + idxAddress { + return nil,fmt.Errorf("Invalid number of parameters for pParameterRange : %v",val.operand) + } + if idxAddress + 1 > len(val.operand) { + res := net.ParseIP(val.operand[idxAddress]) + return pParameterRange4{min: res, max: res},nil + } + addr1 := net.ParseIP(val.operand[idxAddress]) + addr2 := net.ParseIP(val.operand[idxAddress+1]) + return pParameterRange4{min: addr1, max: addr2},nil + + case "range6": + if len(val.operand) == 1 { + address := val.operand[0] + if (strings.Contains(address, "/")) { + cidr := strings.SplitN(address, "/", 2) + if len(cidr) != 2 { return nil,fmt.Errorf("Unknown ipv6 format : %v", address) } + address := net.ParseIP(cidr[0]) + bits,err := strconv.Atoi(cidr[1]) + if err != nil { return nil,err } + mask := net.CIDRMask(bits, net.IPv6len*8) + + // figure out the network address + network := address.Mask(mask) + + // make a broadcast address + broadcast := network + networkSize,totalSize := mask.Size() + hostSize := totalSize-networkSize + for i := networkSize / 8; i < totalSize / 8; i++ { + broadcast[i] = byte(0xff) + } + octetIndex := network[networkSize / 8] + bitsLeft := (uint32)(hostSize%8) + broadcast[octetIndex] = network[octetIndex] | ((1< 1 { + if val.operand[0] == "=" { + return pParameterExpression{parameter: val.name, expression: strings.Join(val.operand[1:],"")},nil + } + } + if length != 1 { + return nil,fmt.Errorf("Invalid number of parameters for pParameterOther : %v",val.operand) + } + if strings.ToLower(val.name) == "not" { + return pParameterBoolean{parameter: val.operand[0], truancy: false},nil + } + return pParameterOther{parameter: val.name, value: val.operand[0]}, nil + } +} + +func parseTokenGroup(val tkGroup) (*pDeclaration,error) { + params := val.id.operand + switch val.id.name { + case "group": + return &pDeclaration{id:pDeclarationGroup{}},nil + + case "pool": + return &pDeclaration{id:pDeclarationPool{}},nil + + case "host": + if len(params) == 1 { + return &pDeclaration{id:pDeclarationHost{name: params[0]}},nil + } + + case "subnet": + if len(params) == 3 && strings.ToLower(params[1]) == "netmask" { + addr := make([]byte, 4) + for i,v := range strings.SplitN(params[2], ".", 4) { + res,err := strconv.ParseInt(v, 10, 0) + if err != nil { return nil,err } + addr[i] = byte(res) + } + oc1,oc2,oc3,oc4 := addr[0],addr[1],addr[2],addr[3] + if subnet,mask := net.ParseIP(params[0]),net.IPv4Mask(oc1,oc2,oc3,oc4); subnet != nil && mask != nil { + return &pDeclaration{id:pDeclarationSubnet4{net.IPNet{IP:subnet,Mask:mask}}},nil + } + } + case "subnet6": + if len(params) == 1 { + ip6 := strings.SplitN(params[0], "/", 2) + if len(ip6) == 2 && strings.Contains(ip6[0], ":") { + address := net.ParseIP(ip6[0]) + prefix,err := strconv.Atoi(ip6[1]) + if err != nil { return nil, err } + return &pDeclaration{id:pDeclarationSubnet6{net.IPNet{IP:address,Mask:net.CIDRMask(prefix, net.IPv6len*8)}}},nil + } + } + case "shared-network": + if len(params) == 1 { + return &pDeclaration{id:pDeclarationShared{name: params[0]}},nil + } + case "": + return &pDeclaration{id:pDeclarationGlobal{}},nil + } + return nil,fmt.Errorf("Invalid pDeclaration : %v : %v", val.id.name, params) +} + +func flattenDhcpConfig(root tkGroup) (*pDeclaration,error) { + var result *pDeclaration + result,err := parseTokenGroup(root) + if err != nil { return nil,err } + + for _,p := range root.params { + param,err := parseParameter(p) + if err != nil { return nil,err } + result.parameters = append(result.parameters, param) + } + for _,p := range root.groups { + group,err := flattenDhcpConfig(*p) + if err != nil { return nil,err } + group.parent = result + result.declarations = append(result.declarations, *group) + } + return result,nil +} + +/** reduce the tree into the things that we care about */ +type grant uint +const ( + ALLOW grant = iota + IGNORE grant = iota + DENY grant = iota +) +type configDeclaration struct { + id []pDeclarationIdentifier + composites []pDeclaration + + address []pParameter + + options map[string]string + grants map[string]grant + attributes map[string]bool + parameters map[string]string + expressions map[string]string + + hostid []pParameterClientMatch +} + +func createDeclaration(node pDeclaration) configDeclaration { + var hierarchy []pDeclaration + + for n := &node; n != nil; n = n.parent { + hierarchy = append(hierarchy, *n) + } + + var result configDeclaration + result.address = make([]pParameter, 0) + + result.options = make(map[string]string) + result.grants = make(map[string]grant) + result.attributes = make(map[string]bool) + result.parameters = make(map[string]string) + result.expressions = make(map[string]string) + + result.hostid = make([]pParameterClientMatch, 0) + + // walk from globals to pDeclaration collecting all parameters + for i := len(hierarchy)-1; i >= 0; i-- { + result.composites = append(result.composites, hierarchy[(len(hierarchy)-1) - i]) + result.id = append(result.id, hierarchy[(len(hierarchy)-1) - i].id) + + // update configDeclaration parameters + for _,p := range hierarchy[i].parameters { + switch p.(type) { + case pParameterOption: + result.options[p.(pParameterOption).name] = p.(pParameterOption).value + case pParameterGrant: + Grant := map[string]grant{"ignore":IGNORE, "allow":ALLOW, "deny":DENY} + result.grants[p.(pParameterGrant).attribute] = Grant[p.(pParameterGrant).verb] + case pParameterBoolean: + result.attributes[p.(pParameterBoolean).parameter] = p.(pParameterBoolean).truancy + case pParameterClientMatch: + result.hostid = append(result.hostid, p.(pParameterClientMatch)) + case pParameterExpression: + result.expressions[p.(pParameterExpression).parameter] = p.(pParameterExpression).expression + case pParameterOther: + result.parameters[p.(pParameterOther).parameter] = p.(pParameterOther).value + default: + result.address = append(result.address, p) + } + } + } + return result +} + +func (e *configDeclaration) repr() string { + var result []string + + var res []string + + res = make([]string, 0) + for _,v := range e.id { res = append(res, v.repr()) } + result = append(result, strings.Join(res, ",")) + + if len(e.address) > 0 { + res = make([]string, 0) + for _,v := range e.address { res = append(res, v.repr()) } + result = append(result, fmt.Sprintf("address : %v", strings.Join(res, ","))) + } + + if len(e.options) > 0 { result = append(result, fmt.Sprintf("options : %v", e.options)) } + if len(e.grants) > 0 { result = append(result, fmt.Sprintf("grants : %v", e.grants)) } + if len(e.attributes) > 0 { result = append(result, fmt.Sprintf("attributes : %v", e.attributes)) } + if len(e.parameters) > 0 { result = append(result, fmt.Sprintf("parameters : %v", e.parameters)) } + if len(e.expressions) > 0 { result = append(result, fmt.Sprintf("parameter-expressions : %v", e.expressions)) } + + if len(e.hostid) > 0 { + res = make([]string, 0) + for _,v := range e.hostid { res = append(res, v.repr()) } + result = append(result, fmt.Sprintf("hostid : %v", strings.Join(res, " "))) + } + return strings.Join(result, "\n") + "\n" +} + +func (e *configDeclaration) IP4() (net.IP,error) { + var result []string + for _,entry := range e.address { + switch entry.(type) { + case pParameterAddress4: + for _,s := range entry.(pParameterAddress4) { + result = append(result, s) + } + } + } + if len(result) > 1 { + return nil,fmt.Errorf("More than one address4 returned : %v", result) + } else if len(result) == 0 { + return nil,fmt.Errorf("No IP4 addresses found") + } + + if res := net.ParseIP(result[0]); res != nil { return res,nil } + res,err := net.ResolveIPAddr("ip4", result[0]) + if err != nil { return nil,err } + return res.IP,nil +} +func (e *configDeclaration) IP6() (net.IP,error) { + var result []string + for _,entry := range e.address { + switch entry.(type) { + case pParameterAddress6: + for _,s := range entry.(pParameterAddress6) { + result = append(result, s) + } + } + } + if len(result) > 1 { + return nil,fmt.Errorf("More than one address6 returned : %v", result) + } else if len(result) == 0 { + return nil,fmt.Errorf("No IP6 addresses found") + } + + if res := net.ParseIP(result[0]); res != nil { return res,nil } + res,err := net.ResolveIPAddr("ip6", result[0]) + if err != nil { return nil,err } + return res.IP,nil +} +func (e *configDeclaration) Hardware() (net.HardwareAddr,error) { + var result []pParameterHardware + for _,addr := range e.address { + switch addr.(type) { + case pParameterHardware: + result = append(result, addr.(pParameterHardware)) + } + } + if len(result) > 0 { + return nil,fmt.Errorf("More than one hardware address returned : %v", result) + } + res := make(net.HardwareAddr, 0) + for _,by := range result[0].address { + res = append(res, by) + } + return res,nil +} + +/*** Dhcp Configuration */ +type DhcpConfiguration []configDeclaration +func ReadDhcpConfiguration(fd *os.File) (DhcpConfiguration,error) { + fromfile,eof := consumeFile(fd) + uncommented := uncomment(eof, fromfile) + tokenized := tokenizeDhcpConfig(eof, uncommented) + parsetree,err := parseDhcpConfig(eof, tokenized) + if err != nil { return nil,err } + + global,err := flattenDhcpConfig(parsetree) + if err != nil { return nil,err } + + var walkDeclarations func(root pDeclaration, out chan*configDeclaration); + walkDeclarations = func(root pDeclaration, out chan*configDeclaration) { + res := createDeclaration(root) + out <- &res + for _,p := range root.declarations { + walkDeclarations(p, out) + } + } + + each := make(chan*configDeclaration) + go func(out chan*configDeclaration) { + walkDeclarations(*global, out) + out <- nil + }(each) + + var result DhcpConfiguration + for decl := <-each; decl != nil; decl = <-each { + result = append(result, *decl) + } + return result,nil +} + +func (e *DhcpConfiguration) Global() configDeclaration { + result := (*e)[0] + if len(result.id) != 1 { + panic(fmt.Errorf("Something that can't happen happened")) + } + return result +} + +func (e *DhcpConfiguration) SubnetByAddress(address net.IP) (configDeclaration,error) { + var result []configDeclaration + for _,entry := range *e { + switch entry.id[0].(type) { + case pDeclarationSubnet4: + id := entry.id[0].(pDeclarationSubnet4) + if id.Contains(address) { + result = append(result, entry) + } + case pDeclarationSubnet6: + id := entry.id[0].(pDeclarationSubnet6) + if id.Contains(address) { + result = append(result, entry) + } + } + } + if len(result) == 0 { + return configDeclaration{},fmt.Errorf("No network declarations containing %s found", address.String()) + } + if len(result) > 1 { + return configDeclaration{},fmt.Errorf("More than 1 network declaration found : %v", result) + } + return result[0],nil +} + +func (e *DhcpConfiguration) HostByName(host string) (configDeclaration,error) { + var result []configDeclaration + for _,entry := range *e { + switch entry.id[0].(type) { + case pDeclarationHost: + id := entry.id[0].(pDeclarationHost) + if strings.ToLower(id.name) == strings.ToLower(host) { + result = append(result, entry) + } + } + } + if len(result) == 0 { + return configDeclaration{},fmt.Errorf("No host declarations containing %s found", host) + } + if len(result) > 1 { + return configDeclaration{},fmt.Errorf("More than 1 host declaration found : %v", result) + } + return result[0],nil +} + +/*** Network Map */ +type NetworkMap []map[string]string +func ReadNetworkMap(fd *os.File) (NetworkMap,error) { + + fromfile,eof := consumeFile(fd) + uncommented := uncomment(eof,fromfile) + tokenized := tokenizeNetworkMapConfig(eof, uncommented) + + result,err := parseNetworkMapConfig(eof, tokenized) + if err != nil { return nil,err } + return result,nil +} + +func (e *NetworkMap) NameIntoDevice(name string) (string,error) { + for _,val := range *e { + if strings.ToLower(val["name"]) == strings.ToLower(name) { + return val["device"],nil + } + } + return "",fmt.Errorf("Network name not found : %v", name) +} +func (e *NetworkMap) DeviceIntoName(device string) (string,error) { + for _,val := range *e { + if strings.ToLower(val["device"]) == strings.ToLower(device) { + return val["name"],nil + } + } + return "",fmt.Errorf("Device name not found : %v", device) +} +func (e *NetworkMap) repr() string { + var result []string + for idx,val := range *e { + result = append(result, fmt.Sprintf("network%d.name = \"%s\"", idx, val["name"])) + result = append(result, fmt.Sprintf("network%d.device = \"%s\"", idx, val["device"])) + } + return strings.Join(result, "\n") +} + +/** main */ +func consumeFile(fd *os.File) (chan byte,sentinelSignaller) { + fromfile := make(chan byte) + eof := make(sentinelSignaller) + go func() { + b := make([]byte, 1) + for { + _,err := fd.Read(b) + if err == io.EOF { break } + fromfile <- b[0] + } + close(eof) + }() + return fromfile,eof +} diff --git a/builder/vmware/common/driver_player5.go b/builder/vmware/common/driver_player5.go index 311e5019c..9dce9540c 100644 --- a/builder/vmware/common/driver_player5.go +++ b/builder/vmware/common/driver_player5.go @@ -14,6 +14,8 @@ import ( // Player5Driver is a driver that can run VMware Player 5 on Linux. type Player5Driver struct { + VmwareDriver + AppPath string VdiskManagerPath string QemuImgPath string @@ -181,6 +183,20 @@ func (d *Player5Driver) Verify() error { "One of these is required to configure disks for VMware Player.") } + // Assigning the path callbacks to VmwareDriver + d.VmwareDriver.DhcpLeasesPath = func(device string) string { + return playerDhcpLeasesPath(device) + } + d.VmwareDriver.VmnetnatConfPath = func() string { + return playerVmnetnatConfPath() + } + d.VmwareDriver.DhcpConfPath = func() string { + return playerVmDhcpConfPath() + } + d.VmwareDriver.NetmapConfPath = func() string { + return playerNetmapConfPath() + } + return nil } @@ -191,11 +207,3 @@ func (d *Player5Driver) ToolsIsoPath(flavor string) string { func (d *Player5Driver) ToolsInstall() error { return nil } - -func (d *Player5Driver) DhcpLeasesPath(device string) string { - return playerDhcpLeasesPath(device) -} - -func (d *Player5Driver) VmnetnatConfPath() string { - return playerVmnetnatConfPath() -} diff --git a/builder/vmware/common/driver_player5_windows.go b/builder/vmware/common/driver_player5_windows.go index e16d275db..6ca065792 100644 --- a/builder/vmware/common/driver_player5_windows.go +++ b/builder/vmware/common/driver_player5_windows.go @@ -65,6 +65,20 @@ func playerVmnetnatConfPath() string { return findFile("vmnetnat.conf", playerDataFilePaths()) } +func playerVmDhcpConfPath() string { + path, err := playerDhcpConfigPathRegistry() + if err != nil { + log.Printf("Error finding configuration in registry: %s", err) + } else if _, err := os.Stat(path); err == nil { + return path + } + return findFile("vmnetdhcp.conf", playerDataFilePaths()) +} + +func playerNetmapConfPath() string { + return findFile("netmap.conf", playerDataFilePaths()) +} + // This reads the VMware installation path from the Windows registry. func playerVMwareRoot() (s string, err error) { key := `SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\vmplayer.exe` @@ -87,7 +101,18 @@ func playerDhcpLeasesPathRegistry() (s string, err error) { log.Printf(`Unable to read registry key %s\%s`, key, subkey) return } + return normalizePath(s), nil +} +// This reads the VMware DHCP configuration path from the Windows registry. +func playerDhcpConfigPathRegistry() (s string, err error) { + key := "SYSTEM\\CurrentControlSet\\services\\VMnetDHCP\\Parameters" + subkey := "ConfFile" + s, err = readRegString(syscall.HKEY_LOCAL_MACHINE, key, subkey) + if err != nil { + log.Printf(`Unable to read registry key %s\%s`, key, subkey) + return + } return normalizePath(s), nil } diff --git a/builder/vmware/common/driver_workstation10.go b/builder/vmware/common/driver_workstation10.go index 7e6ac8b8f..716717e39 100644 --- a/builder/vmware/common/driver_workstation10.go +++ b/builder/vmware/common/driver_workstation10.go @@ -33,3 +33,4 @@ func (d *Workstation10Driver) Verify() error { return workstationVerifyVersion(VMWARE_WS_VERSION) } + diff --git a/builder/vmware/common/driver_workstation9.go b/builder/vmware/common/driver_workstation9.go index d870a4cda..0c463a249 100644 --- a/builder/vmware/common/driver_workstation9.go +++ b/builder/vmware/common/driver_workstation9.go @@ -14,6 +14,8 @@ import ( // Workstation9Driver is a driver that can run VMware Workstation 9 type Workstation9Driver struct { + VmwareDriver + AppPath string VdiskManagerPath string VmrunPath string @@ -142,6 +144,23 @@ func (d *Workstation9Driver) Verify() error { return err } + // Assigning the path callbacks to VmwareDriver + d.VmwareDriver.DhcpLeasesPath = func(device string) string { + return workstationDhcpLeasesPath(device) + } + + d.VmwareDriver.VmnetnatConfPath = func() string { + return workstationVmnetnatConfPath() + } + + d.VmwareDriver.DhcpConfPath = func() string { + return workstationDhcpConfPath() + } + + d.VmwareDriver.NetmapConfPath = func() string { + return workstationNetmapConfPath() + } + return nil } @@ -152,11 +171,3 @@ func (d *Workstation9Driver) ToolsIsoPath(flavor string) string { func (d *Workstation9Driver) ToolsInstall() error { return nil } - -func (d *Workstation9Driver) DhcpLeasesPath(device string) string { - return workstationDhcpLeasesPath(device) -} - -func (d *Workstation9Driver) VmnetnatConfPath() string { - return workstationVmnetnatConfPath() -} diff --git a/builder/vmware/common/driver_workstation9_windows.go b/builder/vmware/common/driver_workstation9_windows.go index 17095badc..e69d5069f 100644 --- a/builder/vmware/common/driver_workstation9_windows.go +++ b/builder/vmware/common/driver_workstation9_windows.go @@ -63,6 +63,14 @@ func workstationVmnetnatConfPath() string { return findFile("vmnetnat.conf", workstationDataFilePaths()) } +func workstationNetmapConfPath() string { + return findFile("netmap.conf", workstationDataFilePaths()) +} + +func workstationDhcpConfPath() string { + return findFile("vmnetdhcp.conf", workstationDataFilePaths()) +} + // See http://blog.natefinch.com/2012/11/go-win-stuff.html // // This is used by workstationVMwareRoot in order to read some registry data. diff --git a/builder/vmware/common/driver_workstation_unix.go b/builder/vmware/common/driver_workstation_unix.go index 2931396a4..2ff94fd37 100644 --- a/builder/vmware/common/driver_workstation_unix.go +++ b/builder/vmware/common/driver_workstation_unix.go @@ -51,6 +51,14 @@ func workstationVmnetnatConfPath() string { return "" } +func workstationNetmapConfPath(device string) string { + return "" // FIXME +} + +func workstationDhcpConfPath(device string) string { + return "/etc/vmware/" + device + "/dhcpd/dhcpd.conf" +} + func workstationVerifyVersion(version string) error { if runtime.GOOS != "linux" { return fmt.Errorf("The VMware WS version %s driver is only supported on Linux, and Windows, at the moment. Your OS: %s", version, runtime.GOOS) diff --git a/builder/vmware/common/guest_ip.go b/builder/vmware/common/guest_ip.go deleted file mode 100644 index 25ca3b795..000000000 --- a/builder/vmware/common/guest_ip.go +++ /dev/null @@ -1,90 +0,0 @@ -package common - -import ( - "errors" - "fmt" - "io/ioutil" - "log" - "os" - "regexp" - "strings" - "time" -) - -// Interface to help find the IP address of a running virtual machine. -type GuestIPFinder interface { - GuestIP() (string, error) -} - -// DHCPLeaseGuestLookup looks up the IP address of a guest using DHCP -// lease information from the VMware network devices. -type DHCPLeaseGuestLookup struct { - // Driver that is being used (to find leases path) - Driver Driver - - // Device that the guest is connected to. - Device string - - // MAC address of the guest. - MACAddress string -} - -func (f *DHCPLeaseGuestLookup) GuestIP() (string, error) { - dhcpLeasesPath := f.Driver.DhcpLeasesPath(f.Device) - log.Printf("DHCP leases path: %s", dhcpLeasesPath) - if dhcpLeasesPath == "" { - return "", errors.New("no DHCP leases path found.") - } - - fh, err := os.Open(dhcpLeasesPath) - if err != nil { - return "", err - } - defer fh.Close() - - dhcpBytes, err := ioutil.ReadAll(fh) - if err != nil { - return "", err - } - - var lastIp string - var lastLeaseEnd time.Time - - var curIp string - var curLeaseEnd time.Time - - ipLineRe := regexp.MustCompile(`^lease (.+?) {$`) - endTimeLineRe := regexp.MustCompile(`^\s*ends \d (.+?);$`) - macLineRe := regexp.MustCompile(`^\s*hardware ethernet (.+?);$`) - - for _, line := range strings.Split(string(dhcpBytes), "\n") { - // Need to trim off CR character when running in windows - line = strings.TrimRight(line, "\r") - - matches := ipLineRe.FindStringSubmatch(line) - if matches != nil { - lastIp = matches[1] - continue - } - - matches = endTimeLineRe.FindStringSubmatch(line) - if matches != nil { - lastLeaseEnd, _ = time.Parse("2006/01/02 15:04:05", matches[1]) - continue - } - - // If the mac address matches and this lease ends farther in the - // future than the last match we might have, then choose it. - matches = macLineRe.FindStringSubmatch(line) - if matches != nil && strings.EqualFold(matches[1], f.MACAddress) && curLeaseEnd.Before(lastLeaseEnd) { - curIp = lastIp - curLeaseEnd = lastLeaseEnd - } - } - - if curIp == "" { - return "", fmt.Errorf("IP not found for MAC %s in DHCP leases at %s", f.MACAddress, dhcpLeasesPath) - } - - return curIp, nil -} diff --git a/builder/vmware/common/guest_ip_test.go b/builder/vmware/common/guest_ip_test.go deleted file mode 100644 index fdd6b4c9c..000000000 --- a/builder/vmware/common/guest_ip_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package common - -import ( - "io/ioutil" - "os" - "testing" -) - -func TestDHCPLeaseGuestLookup_impl(t *testing.T) { - var _ GuestIPFinder = new(DHCPLeaseGuestLookup) -} - -func TestDHCPLeaseGuestLookup(t *testing.T) { - tf, err := ioutil.TempFile("", "packer") - if err != nil { - t.Fatalf("err: %s", err) - } - if _, err := tf.Write([]byte(testLeaseContents)); err != nil { - t.Fatalf("err: %s", err) - } - tf.Close() - defer os.Remove(tf.Name()) - - driver := new(DriverMock) - driver.DhcpLeasesPathResult = tf.Name() - - finder := &DHCPLeaseGuestLookup{ - Driver: driver, - Device: "vmnet8", - MACAddress: "00:0c:29:59:91:02", - } - - ip, err := finder.GuestIP() - if err != nil { - t.Fatalf("err: %s", err) - } - - if !driver.DhcpLeasesPathCalled { - t.Fatal("should ask for DHCP leases path") - } - if driver.DhcpLeasesPathDevice != "vmnet8" { - t.Fatal("should be vmnet8") - } - - if ip != "192.168.126.130" { - t.Fatalf("bad: %#v", ip) - } -} - -const testLeaseContents = ` -# All times in this file are in UTC (GMT), not your local timezone. This is -# not a bug, so please don't ask about it. There is no portable way to -# store leases in the local timezone, so please don't request this as a -# feature. If this is inconvenient or confusing to you, we sincerely -# apologize. Seriously, though - don't ask. -# The format of this file is documented in the dhcpd.leases(5) manual page. - -lease 192.168.126.129 { - starts 0 2013/09/15 23:58:51; - ends 1 2013/09/16 00:28:51; - hardware ethernet 00:0c:29:59:91:02; - client-hostname "precise64"; -} -lease 192.168.126.130 { - starts 2 2013/09/17 21:39:07; - ends 2 2013/09/17 22:09:07; - hardware ethernet 00:0c:29:59:91:02; - client-hostname "precise64"; -} -lease 192.168.126.128 { - starts 0 2013/09/15 20:09:59; - ends 0 2013/09/15 20:21:58; - hardware ethernet 00:0c:29:59:91:02; - client-hostname "precise64"; -} -lease 192.168.126.127 { - starts 0 2013/09/15 20:09:59; - ends 0 2013/09/15 20:21:58; - hardware ethernet 01:0c:29:59:91:02; - client-hostname "precise64"; - -` diff --git a/builder/vmware/common/host_ip.go b/builder/vmware/common/host_ip.go deleted file mode 100644 index f920043e7..000000000 --- a/builder/vmware/common/host_ip.go +++ /dev/null @@ -1,7 +0,0 @@ -package common - -// Interface to help find the host IP that is available from within -// the VMware virtual machines. -type HostIPFinder interface { - HostIP() (string, error) -} diff --git a/builder/vmware/common/host_ip_ifconfig_test.go b/builder/vmware/common/host_ip_ifconfig_test.go deleted file mode 100644 index a69104e69..000000000 --- a/builder/vmware/common/host_ip_ifconfig_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package common - -import "testing" - -func TestIfconfigIPFinder_Impl(t *testing.T) { - var raw interface{} - raw = &IfconfigIPFinder{} - if _, ok := raw.(HostIPFinder); !ok { - t.Fatalf("IfconfigIPFinder is not a host IP finder") - } -} diff --git a/builder/vmware/common/host_ip_vmnetnatconf.go b/builder/vmware/common/host_ip_vmnetnatconf.go deleted file mode 100644 index e07227a5e..000000000 --- a/builder/vmware/common/host_ip_vmnetnatconf.go +++ /dev/null @@ -1,65 +0,0 @@ -package common - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "regexp" - "strings" -) - -// VMnetNatConfIPFinder finds the IP address of the host machine by -// retrieving the IP from the vmnetnat.conf. This isn't a full proof -// technique but so far it has not failed. -type VMnetNatConfIPFinder struct{} - -func (*VMnetNatConfIPFinder) HostIP() (string, error) { - driver := &Workstation9Driver{} - - vmnetnat := driver.VmnetnatConfPath() - if vmnetnat == "" { - return "", errors.New("Could not find NAT vmnet conf file") - } - - if _, err := os.Stat(vmnetnat); err != nil { - return "", fmt.Errorf("Could not find NAT vmnet conf file: %s", vmnetnat) - } - - f, err := os.Open(vmnetnat) - if err != nil { - return "", err - } - defer f.Close() - - ipRe := regexp.MustCompile(`^\s*ip\s*=\s*(.+?)\s*$`) - - r := bufio.NewReader(f) - for { - line, err := r.ReadString('\n') - if line != "" { - matches := ipRe.FindStringSubmatch(line) - if matches != nil { - ip := matches[1] - dotIndex := strings.LastIndex(ip, ".") - if dotIndex == -1 { - continue - } - - ip = ip[0:dotIndex] + ".1" - return ip, nil - } - } - - if err == io.EOF { - break - } - - if err != nil { - return "", err - } - } - - return "", errors.New("host IP not found in " + vmnetnat) -} diff --git a/builder/vmware/common/host_ip_vmnetnatconf_test.go b/builder/vmware/common/host_ip_vmnetnatconf_test.go deleted file mode 100644 index 9f3114a92..000000000 --- a/builder/vmware/common/host_ip_vmnetnatconf_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package common - -import "testing" - -func TestVMnetNatConfIPFinder_Impl(t *testing.T) { - var raw interface{} - raw = &VMnetNatConfIPFinder{} - if _, ok := raw.(HostIPFinder); !ok { - t.Fatalf("VMnetNatConfIPFinder is not a host IP finder") - } -} diff --git a/builder/vmware/common/ssh.go b/builder/vmware/common/ssh.go index 4e339edb9..e03f9b6bf 100644 --- a/builder/vmware/common/ssh.go +++ b/builder/vmware/common/ssh.go @@ -14,34 +14,12 @@ import ( func CommHost(config *SSHConfig) func(multistep.StateBag) (string, error) { return func(state multistep.StateBag) (string, error) { driver := state.Get("driver").(Driver) - vmxPath := state.Get("vmx_path").(string) if config.Comm.SSHHost != "" { return config.Comm.SSHHost, nil } - log.Println("Lookup up IP information...") - - vmxData, err := ReadVMX(vmxPath) - if err != nil { - return "", err - } - - var ok bool - macAddress := "" - if macAddress, ok = vmxData["ethernet0.address"]; !ok || macAddress == "" { - if macAddress, ok = vmxData["ethernet0.generatedaddress"]; !ok || macAddress == "" { - return "", errors.New("couldn't find MAC address in VMX") - } - } - - ipLookup := &DHCPLeaseGuestLookup{ - Driver: driver, - Device: "vmnet8", - MACAddress: macAddress, - } - - ipAddress, err := ipLookup.GuestIP() + ipAddress, err := driver.GuestIP(state) if err != nil { log.Printf("IP lookup failed: %s", err) return "", fmt.Errorf("IP lookup failed: %s", err) diff --git a/builder/vmware/common/step_type_boot_command.go b/builder/vmware/common/step_type_boot_command.go index 35e30c624..c169c59eb 100644 --- a/builder/vmware/common/step_type_boot_command.go +++ b/builder/vmware/common/step_type_boot_command.go @@ -64,7 +64,7 @@ func (s *StepTypeBootCommand) Run(_ context.Context, state multistep.StateBag) m } // Connect to VNC - ui.Say("Connecting to VM via VNC") + ui.Say(fmt.Sprintf("Connecting to VM via VNC (%s:%d)", vncIp, vncPort)) nc, err := net.Dial("tcp", fmt.Sprintf("%s:%d", vncIp, vncPort)) if err != nil { err := fmt.Errorf("Error connecting to VNC: %s", err) @@ -94,16 +94,7 @@ func (s *StepTypeBootCommand) Run(_ context.Context, state multistep.StateBag) m log.Printf("Connected to VNC desktop: %s", c.DesktopName) // Determine the host IP - var ipFinder HostIPFinder - if finder, ok := driver.(HostIPFinder); ok { - ipFinder = finder - } else if runtime.GOOS == "windows" { - ipFinder = new(VMnetNatConfIPFinder) - } else { - ipFinder = &IfconfigIPFinder{Device: "vmnet8"} - } - - hostIP, err := ipFinder.HostIP() + hostIP, err := driver.HostIP(state) if err != nil { err := fmt.Errorf("Error detecting host IP: %s", err) state.Put("error", err) diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index 1d2a29ac5..2cce8fd3c 100644 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -38,12 +38,31 @@ type Config struct { vmwcommon.ToolsConfig `mapstructure:",squash"` vmwcommon.VMXConfig `mapstructure:",squash"` + // disk drives AdditionalDiskSize []uint `mapstructure:"disk_additional_size"` DiskName string `mapstructure:"vmdk_name"` DiskSize uint `mapstructure:"disk_size"` DiskTypeId string `mapstructure:"disk_type_id"` Format string `mapstructure:"format"` + + // platform information GuestOSType string `mapstructure:"guest_os_type"` + Version string `mapstructure:"version"` + VMName string `mapstructure:"vm_name"` + + // Network type + Network string `mapstructure:"network"` + + // device presence + Sound bool `mapstructure:"sound"` + USB bool `mapstructure:"usb"` + + // communication ports + Serial string `mapstructure:"serial"` + Parallel string `mapstructure:"parallel"` + + // booting a guest + BootCommand []string `mapstructure:"boot_command"` KeepRegistered bool `mapstructure:"keep_registered"` OVFToolOptions []string `mapstructure:"ovftool_options"` SkipCompaction bool `mapstructure:"skip_compaction"` @@ -53,6 +72,7 @@ type Config struct { VMXTemplatePath string `mapstructure:"vmx_template_path"` Version string `mapstructure:"version"` + // remote vsphere RemoteType string `mapstructure:"remote_type"` RemoteDatastore string `mapstructure:"remote_datastore"` RemoteCacheDatastore string `mapstructure:"remote_cache_datastore"` @@ -158,6 +178,18 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } + if b.config.Network == "" { + b.config.Network = "nat" + } + + if !b.config.Sound { + b.config.Sound = false + } + + if !b.config.USB { + b.config.USB = false + } + // Remote configuration validation if b.config.RemoteType != "" { if b.config.RemoteHost == "" { diff --git a/builder/vmware/iso/driver_esx5.go b/builder/vmware/iso/driver_esx5.go index f30451c65..6df8271f6 100644 --- a/builder/vmware/iso/driver_esx5.go +++ b/builder/vmware/iso/driver_esx5.go @@ -18,12 +18,16 @@ import ( "github.com/hashicorp/packer/communicator/ssh" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" + vmwcommon "github.com/mitchellh/packer/builder/vmware/common" gossh "golang.org/x/crypto/ssh" ) // ESX5 driver talks to an ESXi5 hypervisor remotely over SSH to build // virtual machines. This driver can only manage one machine at a time. type ESX5Driver struct { + vmwcommon.VmwareDriver + Host string Port uint Username string @@ -143,10 +147,6 @@ func (d *ESX5Driver) ToolsInstall() error { return d.sh("vim-cmd", "vmsvc/tools.install", d.vmId) } -func (d *ESX5Driver) DhcpLeasesPath(string) string { - return "" -} - func (d *ESX5Driver) Verify() error { checks := []func() error{ d.connect, @@ -159,11 +159,10 @@ func (d *ESX5Driver) Verify() error { return err } } - return nil } -func (d *ESX5Driver) HostIP() (string, error) { +func (d *ESX5Driver) HostIP(multistep.StateBag) (string, error) { conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", d.Host, d.Port)) if err != nil { return "", err diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index 029f2a31e..8e9f6cda0 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "os" "path/filepath" + "strings" vmwcommon "github.com/hashicorp/packer/builder/vmware/common" "github.com/hashicorp/packer/helper/multistep" @@ -19,6 +20,21 @@ type vmxTemplateData struct { DiskName string ISOPath string Version string + + Network string + Sound_Present string + Usb_Present string + + Serial_Present string + Serial_Type string + Serial_Endpoint string + Serial_Host string + Serial_Yield string + Serial_Filename string + + Parallel_Present string + Parallel_Bidirectional string + Parallel_Filename string } type additionalDiskTemplateData struct { @@ -39,6 +55,121 @@ type stepCreateVMX struct { tempDir string } +/* serial conversions */ +type serialConfigPipe struct { + filename string + endpoint string + host string + yield string +} + +type serialConfigFile struct { + filename string +} + +type serialConfigDevice struct { + devicename string +} + +type serialUnion struct { + serialType interface{} + pipe *serialConfigPipe + file *serialConfigFile + device *serialConfigDevice +} + +func unformat_serial(config string) (*serialUnion,error) { + comptype := strings.SplitN(config, ":", 2) + if len(comptype) < 1 { + return nil,fmt.Errorf("Unexpected format for serial port: %s", config) + } + switch strings.ToUpper(comptype[0]) { + case "PIPE": + comp := strings.Split(comptype[1], ",") + if len(comp) < 3 || len(comp) > 4 { + return nil,fmt.Errorf("Unexpected format for serial port : pipe : %s", config) + } + if res := strings.ToLower(comp[1]); res != "client" || res != "server" { + return nil,fmt.Errorf("Unexpected format for serial port : pipe : endpoint : %s : %s", res, config) + } + if res := strings.ToLower(comp[2]); res != "app" || res != "vm" { + return nil,fmt.Errorf("Unexpected format for serial port : pipe : host : %s : %s", res, config) + } + res := &serialConfigPipe{ + filename : comp[0], + endpoint : comp[1], + host : map[string]string{"app":"TRUE","vm":"FALSE"}[strings.ToLower(comp[2])], + yield : "FALSE", + } + if len(comp) == 4 { + res.yield = strings.ToUpper(comp[3]) + } + if res.yield != "TRUE" || res.yield != "FALSE" { + return nil,fmt.Errorf("Unexpected format for serial port : pipe : yield : %s : %s", res.yield, config) + } + return &serialUnion{serialType:res, pipe:res},nil + + case "FILE": + res := &serialConfigFile{ filename : comptype[1] } + return &serialUnion{serialType:res, file:res},nil + + case "DEVICE": + res := new(serialConfigDevice) + res.devicename = map[bool]string{true:strings.ToUpper(comptype[1]), false:"COM1"}[len(comptype[1]) > 0] + return &serialUnion{serialType:res, device:res},nil + + default: + return nil,fmt.Errorf("Unknown serial type : %s : %s", strings.ToUpper(comptype[0]), config) + } +} + +/* parallel port */ +type parallelUnion struct { + parallelType interface{} + file *parallelPortFile + device *parallelPortDevice +} +type parallelPortFile struct { + filename string +} +type parallelPortDevice struct { + bidirectional string + devicename string +} + +func unformat_parallel(config string) (*parallelUnion,error) { + comptype := strings.SplitN(config, ":", 2) + if len(comptype) < 1 { + return nil,fmt.Errorf("Unexpected format for parallel port: %s", config) + } + switch strings.ToUpper(comptype[0]) { + case "FILE": + res := ¶llelPortFile{ filename: comptype[1] } + return ¶llelUnion{ parallelType:res, file: res},nil + case "DEVICE": + comp := strings.Split(comptype[1], ",") + if len(comp) < 1 || len(comp) > 2 { + return nil,fmt.Errorf("Unexpected format for parallel port: %s", config) + } + res := new(parallelPortDevice) + res.bidirectional = "FALSE" + res.devicename = strings.ToUpper(comp[0]) + if len(comp) > 1 { + switch strings.ToUpper(comp[1]) { + case "BI": + res.bidirectional = "TRUE" + case "UNI": + res.bidirectional = "FALSE" + default: + return nil,fmt.Errorf("Unknown parallel port direction : %s : %s", strings.ToUpper(comp[0]), config) + } + } + return ¶llelUnion{ parallelType:res, device: res},nil + } + return nil,fmt.Errorf("Unexpected format for parallel port: %s", config) +} + +/* regular steps */ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { config := state.Get("config").(*Config) isoPath := state.Get("iso_path").(string) @@ -111,14 +242,90 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist } } - ctx.Data = &vmxTemplateData{ + templateData := vmxTemplateData{ Name: config.VMName, GuestOS: config.GuestOSType, DiskName: config.DiskName, Version: config.Version, ISOPath: isoPath, + + Network: config.Network, + Sound_Present: map[bool]string{true:"TRUE",false:"FALSE"}[bool(config.Sound)], + Usb_Present: map[bool]string{true:"TRUE",false:"FALSE"}[bool(config.USB)], + + Serial_Present: "FALSE", + Parallel_Present: "FALSE", } + // store the network so that we can later figure out what ip address to bind to + state.Put("vmnetwork", config.Network) + + // check if serial port has been configured + if config.Serial != "" { + serial,err := unformat_serial(config.Serial) + if err != nil { + err := fmt.Errorf("Error procesing VMX template: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + templateData.Serial_Present = "TRUE" + templateData.Serial_Filename = "" + templateData.Serial_Yield = "" + templateData.Serial_Endpoint = "" + templateData.Serial_Host = "" + + switch serial.serialType.(type) { + case *serialConfigPipe: + templateData.Serial_Type = "pipe" + templateData.Serial_Endpoint = serial.pipe.endpoint + templateData.Serial_Host = serial.pipe.host + templateData.Serial_Yield = serial.pipe.yield + templateData.Serial_Filename = filepath.FromSlash(serial.pipe.filename) + case *serialConfigFile: + templateData.Serial_Type = "file" + templateData.Serial_Filename = filepath.FromSlash(serial.file.filename) + case *serialConfigDevice: + templateData.Serial_Type = "device" + templateData.Serial_Filename = filepath.FromSlash(serial.device.devicename) + default: + err := fmt.Errorf("Error procesing VMX template: %v", serial) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + } + + // check if parallel port has been configured + if config.Parallel != "" { + parallel,err := unformat_parallel(config.Parallel) + if err != nil { + err := fmt.Errorf("Error procesing VMX template: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + switch parallel.parallelType.(type) { + case *parallelPortFile: + templateData.Parallel_Present = "TRUE" + templateData.Parallel_Filename = filepath.FromSlash(parallel.file.filename) + case *parallelPortDevice: + templateData.Parallel_Present = "TRUE" + templateData.Parallel_Bidirectional = parallel.device.bidirectional + templateData.Parallel_Filename = filepath.FromSlash(parallel.device.devicename) + default: + err := fmt.Errorf("Error procesing VMX template: %v", parallel) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + } + + ctx.Data = &templateData + + // render the .vmx template vmxContents, err := interpolate.Render(vmxTemplate, &ctx) if err != nil { err := fmt.Errorf("Error procesing VMX template: %s", err) @@ -176,7 +383,7 @@ ehci.pciSlotNumber = "34" ehci.present = "TRUE" ethernet0.addressType = "generated" ethernet0.bsdName = "en0" -ethernet0.connectionType = "nat" +ethernet0.connectionType = "{{ .Network }}" ethernet0.displayName = "Ethernet" ethernet0.linkStatePropagation.enable = "FALSE" ethernet0.pciSlotNumber = "33" @@ -227,11 +434,38 @@ scsi0.virtualDev = "lsilogic" scsi0:0.fileName = "{{ .DiskName }}.vmdk" scsi0:0.present = "TRUE" scsi0:0.redo = "" -sound.startConnected = "FALSE" + +// Sound +sound.startConnected = "{{ .Sound_Present }}" +sound.present = "{{ .Sound_Present }}" +sound.fileName = "-1" +sound.autodetect = "TRUE" + tools.syncTime = "TRUE" tools.upgrade.policy = "upgradeAtPowerCycle" + +// USB usb.pciSlotNumber = "32" -usb.present = "FALSE" +usb.present = "{{ .Usb_Present }}" +usb_xhci.present = "TRUE" + +// Serial +serial0.present = "{{ .Serial_Present }}" +serial0.startConnected = "{{ .Serial_Present }}" +serial0.fileName = "{{ .Serial_Filename }}" +serial0.autodetect = "TRUE" +serial0.fileType = "{{ .Serial_Type }}" +serial0.yieldOnMsrRead = "{{ .Serial_Yield }}" +serial0.pipe.endPoint = "{{ .Serial_Endpoint }}" +serial0.tryNoRxLoss = "{{ .Serial_Host }}" + +// Parallel +parallel0.present = "{{ .Parallel_Present }}" +parallel0.startConnected = "{{ .Parallel_Present }}" +parallel0.fileName = "{{ .Parallel_Filename }}" +parallel0.autodetect = "TRUE" +parallel0.bidirectional = "{{ .Parallel_Bidirectional }}" + virtualHW.productCompatibility = "hosted" virtualHW.version = "{{ .Version }}" vmci0.id = "1861462627" diff --git a/builder/vmware/vmx/step_clone_vmx.go b/builder/vmware/vmx/step_clone_vmx.go index f7874067c..bd644ca30 100644 --- a/builder/vmware/vmx/step_clone_vmx.go +++ b/builder/vmware/vmx/step_clone_vmx.go @@ -54,6 +54,16 @@ func (s *StepCloneVMX) Run(_ context.Context, state multistep.StateBag) multiste return multistep.ActionHalt } + var networkType string + if _, ok := vmxData["ethernet0.connectionType"]; ok { + networkType = vmxData["ethernet0.connectionType"] + } + if networkType == "" { + networkType = "nat" + log.Printf("Defaulting to network type : nat") + } + + state.Put("vmnetwork", networkType) state.Put("full_disk_path", filepath.Join(s.OutputDir, diskName)) state.Put("vmx_path", vmxPath) return multistep.ActionContinue From 4225b3568ee87376272dda4fdcf3325eea49cc9e Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Tue, 5 Apr 2016 18:45:36 -0500 Subject: [PATCH 157/251] Fixed bad ORs and a bad fmtstring. --- builder/vmware/common/driver_parser.go | 2 +- builder/vmware/iso/step_create_vmx.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/vmware/common/driver_parser.go b/builder/vmware/common/driver_parser.go index 38470eb5b..7071e424d 100644 --- a/builder/vmware/common/driver_parser.go +++ b/builder/vmware/common/driver_parser.go @@ -362,7 +362,7 @@ type pParameterBoolean struct { parameter string truancy bool } -func (e pParameterBoolean) repr() string { return fmt.Sprintf("boolean:%s=%s",e.parameter,e.truancy) } +func (e pParameterBoolean) repr() string { return fmt.Sprintf("boolean:%s=%v",e.parameter,e.truancy) } type pParameterClientMatch struct { name string diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index 8e9f6cda0..c1f4ea8f3 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -89,10 +89,10 @@ func unformat_serial(config string) (*serialUnion,error) { if len(comp) < 3 || len(comp) > 4 { return nil,fmt.Errorf("Unexpected format for serial port : pipe : %s", config) } - if res := strings.ToLower(comp[1]); res != "client" || res != "server" { + if res := strings.ToLower(comp[1]); res != "client" && res != "server" { return nil,fmt.Errorf("Unexpected format for serial port : pipe : endpoint : %s : %s", res, config) } - if res := strings.ToLower(comp[2]); res != "app" || res != "vm" { + if res := strings.ToLower(comp[2]); res != "app" && res != "vm" { return nil,fmt.Errorf("Unexpected format for serial port : pipe : host : %s : %s", res, config) } res := &serialConfigPipe{ @@ -104,7 +104,7 @@ func unformat_serial(config string) (*serialUnion,error) { if len(comp) == 4 { res.yield = strings.ToUpper(comp[3]) } - if res.yield != "TRUE" || res.yield != "FALSE" { + if res.yield != "TRUE" && res.yield != "FALSE" { return nil,fmt.Errorf("Unexpected format for serial port : pipe : yield : %s : %s", res.yield, config) } return &serialUnion{serialType:res, pipe:res},nil From 9b95ce0bc6ff4da69fdf91031e2e463fb049c455 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Tue, 5 Apr 2016 20:45:58 -0500 Subject: [PATCH 158/251] Completely forgot to support the unix paths in each of the drivers for the VMware builder. Fixed. --- builder/vmware/common/driver.go | 33 ++++++------ builder/vmware/common/driver_mock.go | 24 ++++++--- builder/vmware/common/driver_player5.go | 12 +++-- .../vmware/common/driver_player5_windows.go | 13 ++--- builder/vmware/common/driver_player_unix.go | 46 ++++++++++++++--- builder/vmware/common/driver_workstation9.go | 9 ++-- .../common/driver_workstation9_windows.go | 12 +++-- .../vmware/common/driver_workstation_unix.go | 51 ++++++++++++++----- 8 files changed, 136 insertions(+), 64 deletions(-) diff --git a/builder/vmware/common/driver.go b/builder/vmware/common/driver.go index 1098e591f..fa2cc0c03 100644 --- a/builder/vmware/common/driver.go +++ b/builder/vmware/common/driver.go @@ -235,8 +235,8 @@ type VmwareDriver struct { /// files so that the address detection (ip and ethernet) machinery /// works. DhcpLeasesPath func(string) string - VmnetnatConfPath func() string - DhcpConfPath func() string + DhcpConfPath func(string) string + VmnetnatConfPath func(string) string NetmapConfPath func() string } @@ -359,19 +359,20 @@ func (d *VmwareDriver) HostAddress(state multistep.StateBag) (string,error) { netmap,err := readNetmapConfig(pathNetmap) if err != nil { return "",err } - // parse dhcpd configuration - pathDhcpConfig := d.DhcpConfPath() - if _, err := os.Stat(pathDhcpConfig); err != nil { - return "", fmt.Errorf("Could not find vmnetdhcp conf file: %s", pathDhcpConfig) - } - config,err := readDhcpConfig(pathDhcpConfig) - if err != nil { return "",err } - // convert network to name network := state.Get("vmnetwork").(string) device,err := netmap.NameIntoDevice(network) if err != nil { return "", err } + // parse dhcpd configuration + pathDhcpConfig := d.DhcpConfPath(device) + if _, err := os.Stat(pathDhcpConfig); err != nil { + return "", fmt.Errorf("Could not find vmnetdhcp conf file: %s", pathDhcpConfig) + } + + config,err := readDhcpConfig(pathDhcpConfig) + if err != nil { return "",err } + // find the entry configured in the dhcpd interfaceConfig,err := config.HostByName(device) if err != nil { return "", err } @@ -404,19 +405,19 @@ func (d *VmwareDriver) HostIP(state multistep.StateBag) (string,error) { netmap,err := readNetmapConfig(pathNetmap) if err != nil { return "",err } + // convert network to name + network := state.Get("vmnetwork").(string) + device,err := netmap.NameIntoDevice(network) + if err != nil { return "", err } + // parse dhcpd configuration - pathDhcpConfig := d.DhcpConfPath() + pathDhcpConfig := d.DhcpConfPath(device) if _, err := os.Stat(pathDhcpConfig); err != nil { return "", fmt.Errorf("Could not find vmnetdhcp conf file: %s", pathDhcpConfig) } config,err := readDhcpConfig(pathDhcpConfig) if err != nil { return "",err } - // convert network to name - network := state.Get("vmnetwork").(string) - device,err := netmap.NameIntoDevice(network) - if err != nil { return "", err } - // find the entry configured in the dhcpd interfaceConfig,err := config.HostByName(device) if err != nil { return "", err } diff --git a/builder/vmware/common/driver_mock.go b/builder/vmware/common/driver_mock.go index 15e006403..da28cd474 100644 --- a/builder/vmware/common/driver_mock.go +++ b/builder/vmware/common/driver_mock.go @@ -78,12 +78,15 @@ type DriverMock struct { DhcpLeasesPathDevice string DhcpLeasesPathResult string - NetmapPathCalled bool - NetmapPathResult string - DhcpConfPathCalled bool DhcpConfPathResult string + VmnetnatConfPathCalled bool + VmnetnatConfPathResult string + + NetmapConfPathCalled bool + NetmapConfPathResult string + VerifyCalled bool VerifyErr error } @@ -184,16 +187,21 @@ func (d *DriverMock) DhcpLeasesPath(device string) string { return d.DhcpLeasesPathResult } -func (d *DriverMock) NetmapPath(device string) string { - d.NetmapPathCalled = true - return d.NetmapPathResult -} - func (d *DriverMock) DhcpConfPath(device string) string { d.DhcpConfPathCalled = true return d.DhcpConfPathResult } +func (d *DriverMock) VmnetnatConfPath(device string) string { + d.VmnetnatConfPathCalled = true + return d.VmnetnatConfPathResult +} + +func (d *DriverMock) NetmapConfPath() string { + d.NetmapConfPathCalled = true + return d.NetmapConfPathResult +} + func (d *DriverMock) Verify() error { d.VerifyCalled = true return d.VerifyErr diff --git a/builder/vmware/common/driver_player5.go b/builder/vmware/common/driver_player5.go index 9dce9540c..240b44a11 100644 --- a/builder/vmware/common/driver_player5.go +++ b/builder/vmware/common/driver_player5.go @@ -187,16 +187,18 @@ func (d *Player5Driver) Verify() error { d.VmwareDriver.DhcpLeasesPath = func(device string) string { return playerDhcpLeasesPath(device) } - d.VmwareDriver.VmnetnatConfPath = func() string { - return playerVmnetnatConfPath() + + d.VmwareDriver.DhcpConfPath = func(device string) string { + return playerVmDhcpConfPath(device) } - d.VmwareDriver.DhcpConfPath = func() string { - return playerVmDhcpConfPath() + + d.VmwareDriver.VmnetnatConfPath = func(device string) string { + return playerVmnetnatConfPath(device) } + d.VmwareDriver.NetmapConfPath = func() string { return playerNetmapConfPath() } - return nil } diff --git a/builder/vmware/common/driver_player5_windows.go b/builder/vmware/common/driver_player5_windows.go index 6ca065792..790a4c46c 100644 --- a/builder/vmware/common/driver_player5_windows.go +++ b/builder/vmware/common/driver_player5_windows.go @@ -57,15 +57,11 @@ func playerDhcpLeasesPath(device string) string { } else if _, err := os.Stat(path); err == nil { return path } - return findFile("vmnetdhcp.leases", playerDataFilePaths()) } -func playerVmnetnatConfPath() string { - return findFile("vmnetnat.conf", playerDataFilePaths()) -} - -func playerVmDhcpConfPath() string { +func playerVmDhcpConfPath(device string) string { + // the device isn't actually used on windows hosts path, err := playerDhcpConfigPathRegistry() if err != nil { log.Printf("Error finding configuration in registry: %s", err) @@ -75,6 +71,11 @@ func playerVmDhcpConfPath() string { return findFile("vmnetdhcp.conf", playerDataFilePaths()) } +func playerVmnetnatConfPath(device string) string { + // the device isn't actually used on windows hosts + return findFile("vmnetnat.conf", playerDataFilePaths()) +} + func playerNetmapConfPath() string { return findFile("netmap.conf", playerDataFilePaths()) } diff --git a/builder/vmware/common/driver_player_unix.go b/builder/vmware/common/driver_player_unix.go index 0c2cc8635..01b877d92 100644 --- a/builder/vmware/common/driver_player_unix.go +++ b/builder/vmware/common/driver_player_unix.go @@ -10,6 +10,7 @@ import ( "os/exec" "regexp" "runtime" + "path/filepath" ) func playerFindVdiskManager() (string, error) { @@ -28,16 +29,49 @@ func playerFindVmrun() (string, error) { return exec.LookPath("vmrun") } -func playerDhcpLeasesPath(device string) string { - return "/etc/vmware/" + device + "/dhcpd/dhcpd.leases" -} - func playerToolsIsoPath(flavor string) string { return "/usr/lib/vmware/isoimages/" + flavor + ".iso" } -func playerVmnetnatConfPath() string { - return "" +// return the base path to vmware's config on the host +func playerVMwareRoot() (s string, err error) { + return "/etc/vmware", nil +} + +func playerDhcpLeasesPath(device string) string { + base, err := playerVMwareRoot() + if err != nil { + log.Printf("Error finding VMware root: %s", err) + return "" + } + return filepath.Join(base, device, "dhcpd/dhcpd.leases") +} + +func playerVmDhcpConfPath(device string) string { + base, err := playerVMwareRoot() + if err != nil { + log.Printf("Error finding VMware root: %s", err) + return "" + } + return filepath.Join(base, device, "dhcp/dhcp.conf") +} + +func playerVmnetnatConfPath(device string) string { + base, err := playerVMwareRoot() + if err != nil { + log.Printf("Error finding VMware root: %s", err) + return "" + } + return filepath.Join(base, device, "nat/nat.conf") +} + +func playerNetmapConfPath() string { + base, err := playerVMwareRoot() + if err != nil { + log.Printf("Error finding VMware root: %s", err) + return "" + } + return filepath.Join(base, "netmap.conf") } func playerVerifyVersion(version string) error { diff --git a/builder/vmware/common/driver_workstation9.go b/builder/vmware/common/driver_workstation9.go index 0c463a249..8a6eb5556 100644 --- a/builder/vmware/common/driver_workstation9.go +++ b/builder/vmware/common/driver_workstation9.go @@ -149,18 +149,17 @@ func (d *Workstation9Driver) Verify() error { return workstationDhcpLeasesPath(device) } - d.VmwareDriver.VmnetnatConfPath = func() string { - return workstationVmnetnatConfPath() + d.VmwareDriver.DhcpConfPath = func(device string) string { + return workstationDhcpConfPath(device) } - d.VmwareDriver.DhcpConfPath = func() string { - return workstationDhcpConfPath() + d.VmwareDriver.VmnetnatConfPath = func(device string) string { + return workstationVmnetnatConfPath(device) } d.VmwareDriver.NetmapConfPath = func() string { return workstationNetmapConfPath() } - return nil } diff --git a/builder/vmware/common/driver_workstation9_windows.go b/builder/vmware/common/driver_workstation9_windows.go index e69d5069f..b8d6d2b61 100644 --- a/builder/vmware/common/driver_workstation9_windows.go +++ b/builder/vmware/common/driver_workstation9_windows.go @@ -59,7 +59,13 @@ func workstationDhcpLeasesPath(device string) string { return findFile("vmnetdhcp.leases", workstationDataFilePaths()) } -func workstationVmnetnatConfPath() string { +func workstationDhcpConfPath(device string) string { + // device isn't used on a windows host + return findFile("vmnetdhcp.conf", workstationDataFilePaths()) +} + +func workstationVmnetnatConfPath(device string) string { + // device isn't used on a windows host return findFile("vmnetnat.conf", workstationDataFilePaths()) } @@ -67,10 +73,6 @@ func workstationNetmapConfPath() string { return findFile("netmap.conf", workstationDataFilePaths()) } -func workstationDhcpConfPath() string { - return findFile("vmnetdhcp.conf", workstationDataFilePaths()) -} - // See http://blog.natefinch.com/2012/11/go-win-stuff.html // // This is used by workstationVMwareRoot in order to read some registry data. diff --git a/builder/vmware/common/driver_workstation_unix.go b/builder/vmware/common/driver_workstation_unix.go index 2ff94fd37..45e94d21e 100644 --- a/builder/vmware/common/driver_workstation_unix.go +++ b/builder/vmware/common/driver_workstation_unix.go @@ -39,26 +39,51 @@ func workstationFindVmrun() (string, error) { return exec.LookPath("vmrun") } +// return the base path to vmware's config on the host +func workstationVMwareRoot() (s string, err error) { + return "/etc/vmware", nil +} + func workstationDhcpLeasesPath(device string) string { - return "/etc/vmware/" + device + "/dhcpd/dhcpd.leases" + base, err := workstationVMwareRoot() + if err != nil { + log.Printf("Error finding VMware root: %s", err) + return "" + } + return filepath.Join(base, device, "dhcpd/dhcpd.leases") +} + +func workstationDhcpConfPath(device string) string { + base, err := workstationVMwareRoot() + if err != nil { + log.Printf("Error finding VMware root: %s", err) + return "" + } + return filepath.Join(base, device, "dhcp/dhcpd.conf") +} + +func workstationVmnetnatConfPath(device string) string { + base, err := workstationVMwareRoot() + if err != nil { + log.Printf("Error finding VMware root: %s", err) + return "" + } + return filepath.Join(base, device, "nat/nat.conf") +} + +func workstationNetmapConfPath() string { + base, err := workstationVMwareRoot() + if err != nil { + log.Printf("Error finding VMware root: %s", err) + return "" + } + return filepath.Join(base, "netmap.conf") } func workstationToolsIsoPath(flavor string) string { return "/usr/lib/vmware/isoimages/" + flavor + ".iso" } -func workstationVmnetnatConfPath() string { - return "" -} - -func workstationNetmapConfPath(device string) string { - return "" // FIXME -} - -func workstationDhcpConfPath(device string) string { - return "/etc/vmware/" + device + "/dhcpd/dhcpd.conf" -} - func workstationVerifyVersion(version string) error { if runtime.GOOS != "linux" { return fmt.Errorf("The VMware WS version %s driver is only supported on Linux, and Windows, at the moment. Your OS: %s", version, runtime.GOOS) From 15cb6a833a32a1f44c00625c00fe36ac52084a7d Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Tue, 5 Apr 2016 20:56:47 -0500 Subject: [PATCH 159/251] Ugh..missing argument in VMware builder's driver_esx5 unit-test due to api change for .HostIP(). Fixed. --- builder/vmware/iso/driver_esx5_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/builder/vmware/iso/driver_esx5_test.go b/builder/vmware/iso/driver_esx5_test.go index 0c0ef757e..097d6e186 100644 --- a/builder/vmware/iso/driver_esx5_test.go +++ b/builder/vmware/iso/driver_esx5_test.go @@ -50,8 +50,9 @@ func TestESX5Driver_HostIP(t *testing.T) { defer listen.Close() driver := ESX5Driver{Host: "localhost", Port: uint(port)} + state := new(multistep.BasicStateBag) - if host, _ := driver.HostIP(); host != expected_host { + if host, _ := driver.HostIP(state); host != expected_host { t.Error(fmt.Sprintf("Expected string, %s but got %s", expected_host, host)) } } From e389d30a1b15de3042f35faa70b7fc4cad7b5c06 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Sun, 24 Apr 2016 22:22:13 -0500 Subject: [PATCH 160/251] Implemented the unit-tests for builder/vmware/iso/step_create_vmx.go --- builder/vmware/iso/step_create_vmx_test.go | 327 +++++++++++++++++++++ 1 file changed, 327 insertions(+) create mode 100644 builder/vmware/iso/step_create_vmx_test.go diff --git a/builder/vmware/iso/step_create_vmx_test.go b/builder/vmware/iso/step_create_vmx_test.go new file mode 100644 index 000000000..e540eee8a --- /dev/null +++ b/builder/vmware/iso/step_create_vmx_test.go @@ -0,0 +1,327 @@ +package iso + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "math" + "math/rand" + "strconv" + "io/ioutil" + "bytes" + "runtime" + + "testing" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template" + "github.com/mitchellh/packer/provisioner/shell" +) + +var vmxTestBuilderConfig = map[string]string{ + "type": `"vmware-iso"`, + "iso_url": `"https://archive.org/download/ut-ttylinux-i686-12.6/ut-ttylinux-i686-12.6.iso"`, + "iso_checksum_type": `"md5"`, + "iso_checksum": `"43c1feeae55a44c6ef694b8eb18408a6"`, + "ssh_username": `"root"`, + "ssh_password": `"password"`, + "ssh_wait_timeout": `"45s"`, + "boot_command": `["","rootpassword","udhcpc"]`, + "shutdown_command": `"/sbin/shutdown -h; exit 0"`, +} + +var vmxTestProvisionerConfig = map[string]string{ + "type": `"shell"`, + "inline": `["echo hola mundo"]`, +} + +const vmxTestTemplate string = `{"builders":[{%s}],"provisioners":[{%s}]}` + +func tmpnam(prefix string) string { + var path string + var err error + + const length = 16 + + dir := os.TempDir() + max := int(math.Pow(2, float64(length))) + + n,err := rand.Intn(max),nil + for path = filepath.Join(dir, prefix + strconv.Itoa(n)); err == nil; _,err = os.Stat(path) { + n = rand.Intn(max) + path = filepath.Join(dir, prefix + strconv.Itoa(n)) + } + return path +} + +func createFloppyOutput(prefix string) (string,string,error) { + output := tmpnam(prefix) + f,err := os.Create(output) + if err != nil { return "","",fmt.Errorf("Unable to create empty %s: %s", output, err) } + f.Close() + + vmxData := []string{ + `"floppy0.present":"TRUE"`, + `"floppy0.fileType":"file"`, + `"floppy0.clientDevice":"FALSE"`, + `"floppy0.fileName":"%s"`, + `"floppy0.startConnected":"TRUE"`, + } + + outputFile := strings.Replace(output, "\\", "\\\\", -1) + vmxString := fmt.Sprintf("{" + strings.Join(vmxData, ",") + "}", outputFile) + return output,vmxString,nil +} + +func readFloppyOutput(path string) (string,error) { + f,err := os.Open(path) + if err != nil { return "",fmt.Errorf("Unable to open file %s", path) } + defer f.Close() + data,err := ioutil.ReadAll(f) + if err != nil { return "",fmt.Errorf("Unable to read file: %s", err) } + if len(data) == 0 { return "", nil } + return string(data[:bytes.IndexByte(data,0)]),nil +} + +func setupVMwareBuild(t *testing.T, builderConfig map[string]string, provisionerConfig map[string]string) error { + ui := packer.TestUi(t) + + // create builder config and update with user-supplied options + cfgBuilder := map[string]string{} + for k,v := range vmxTestBuilderConfig { cfgBuilder[k] = v } + for k,v := range builderConfig { cfgBuilder[k] = v } + + // convert our builder config into a single sprintfable string + builderLines := []string{} + for k,v := range cfgBuilder { + builderLines = append(builderLines, fmt.Sprintf(`"%s":%s`, k, v)) + } + + // create provisioner config and update with user-supplied options + cfgProvisioner := map[string]string{} + for k,v := range vmxTestProvisionerConfig { cfgProvisioner[k] = v } + for k,v := range provisionerConfig { cfgProvisioner[k] = v } + + // convert our provisioner config into a single sprintfable string + provisionerLines := []string{} + for k,v := range cfgProvisioner { + provisionerLines = append(provisionerLines, fmt.Sprintf(`"%s":%s`, k, v)) + } + + // and now parse them into a template + configString := fmt.Sprintf(vmxTestTemplate, strings.Join(builderLines,`,`), strings.Join(provisionerLines,`,`)) + + tpl,err := template.Parse(strings.NewReader(configString)) + if err != nil { + t.Fatalf("Unable to parse test config: %s", err) + } + + // create our config to test the vmware-iso builder + components := packer.ComponentFinder{ + Builder: func(n string) (packer.Builder,error) { + return &Builder{},nil + }, + Hook: func(n string) (packer.Hook,error) { + return &packer.DispatchHook{},nil + }, + PostProcessor: func(n string) (packer.PostProcessor,error) { + return &packer.MockPostProcessor{},nil + }, + Provisioner: func(n string) (packer.Provisioner,error) { + return &shell.Provisioner{},nil + }, + } + config := packer.CoreConfig{ + Template: tpl, + Components: components, + } + + // create a core using our template + core,err := packer.NewCore(&config) + if err != nil { t.Fatalf("Unable to create core: %s", err) } + + // now we can prepare our build + b,err := core.Build("vmware-iso") + if err != nil { t.Fatalf("Unable to create build: %s", err) } + + warn,err := b.Prepare() + if len(warn) > 0 { + for _,w := range warn { + t.Logf("Configuration warning: %s", w) + } + } + + // and then finally build it + cache := &packer.FileCache{CacheDir: os.TempDir()} + artifacts,err := b.Run(ui, cache) + if err != nil { + t.Fatalf("Failed to build artifact: %s", err) + } + + // check to see that we only got one artifact back + if len(artifacts) == 1 { + return artifacts[0].Destroy() + } + + // otherwise some number of errors happened + t.Logf("Unexpected number of artifacts returned: %d", len(artifacts)) + errors := make([]error, 0) + for _,artifact := range artifacts { + if err := artifact.Destroy(); err != nil { + errors = append(errors, err) + } + } + if len(errors) > 0 { + t.Errorf("%d Errors returned while trying to destroy artifacts", len(errors)) + return fmt.Errorf("Error while trying to destroy artifacts: %v", errors) + } + return nil +} + +func TestStepCreateVmx_SerialFile(t *testing.T) { + tmpfile := tmpnam("SerialFileInput.") + + serialConfig := map[string]string{ + "serial": fmt.Sprintf(`"file:%s"`, filepath.ToSlash(tmpfile)), + } + + error := setupVMwareBuild(t, serialConfig, map[string]string{}) + if error != nil { t.Errorf("Unable to read file: %s", error) } + + f,err := os.Stat(tmpfile) + if err != nil { + t.Errorf("VMware builder did not create a file for serial port: %s", err) + } + + if f != nil { + if err := os.Remove(tmpfile); err != nil { + t.Fatalf("Unable to remove file %s: %s", tmpfile, err) + } + } +} + +func TestStepCreateVmx_SerialPort(t *testing.T) { + var defaultSerial string + if runtime.GOOS == "windows" { + defaultSerial = "COM1" + } else { + defaultSerial = "/dev/ttyS0" + } + + config := map[string]string{ + "serial": fmt.Sprintf(`"device:%s"`, filepath.ToSlash(defaultSerial)), + } + provision := map[string]string{ + "inline": `"dmesg | egrep -o '^serial8250: ttyS1 at' > /dev/fd0"`, + } + + // where to write output + output,vmxData,err := createFloppyOutput("SerialPortOutput.") + if err != nil { t.Fatalf("Error creating output: %s", err) } + defer func() { if _,err := os.Stat(output); err == nil { os.Remove(output) } }() + config["vmx_data"] = vmxData + t.Logf("Preparing to write output to %s", output) + + // whee + err = setupVMwareBuild(t, config, provision) + if err != nil { t.Errorf("%s", err) } + + // check the output + data,err := readFloppyOutput(output) + if err != nil { t.Errorf("%s", err) } + + if data != "serial8250: ttyS1 at\n" { + t.Errorf("Serial port not detected : %v", data) + } +} + +func TestStepCreateVmx_ParallelPort(t *testing.T) { + var defaultParallel string + if runtime.GOOS == "windows" { + defaultParallel = "LPT1" + } else { + defaultParallel = "/dev/lp0" + } + + config := map[string]string{ + "parallel": fmt.Sprintf(`"device:%s,uni"`, filepath.ToSlash(defaultParallel)), + } + provision := map[string]string{ + "inline": `"cat /proc/modules | egrep -o '^parport ' > /dev/fd0"`, + } + + // where to write output + output,vmxData,err := createFloppyOutput("ParallelPortOutput.") + if err != nil { t.Fatalf("Error creating output: %s", err) } + defer func() { if _,err := os.Stat(output); err == nil { os.Remove(output) } }() + config["vmx_data"] = vmxData + t.Logf("Preparing to write output to %s", output) + + // whee + error := setupVMwareBuild(t, config, provision) + if error != nil { t.Errorf("%s", error) } + + // check the output + data,err := readFloppyOutput(output) + if err != nil { t.Errorf("%s", err) } + + if data != "parport \n" { + t.Errorf("Parallel port not detected : %v", data) + } +} + +func TestStepCreateVmx_Usb(t *testing.T) { + config := map[string]string{ + "usb": `"TRUE"`, + } + provision := map[string]string{ + "inline": `"dmesg | egrep -m1 -o 'USB hub found$' > /dev/fd0"`, + } + + // where to write output + output,vmxData,err := createFloppyOutput("UsbOutput.") + if err != nil { t.Fatalf("Error creating output: %s", err) } + defer func() { if _,err := os.Stat(output); err == nil { os.Remove(output) } }() + config["vmx_data"] = vmxData + t.Logf("Preparing to write output to %s", output) + + // whee + error := setupVMwareBuild(t, config, provision) + if error != nil { t.Errorf("%s", error) } + + // check the output + data,err := readFloppyOutput(output) + if err != nil { t.Errorf("%s", err) } + + if data != "USB hub found\n" { + t.Errorf("USB support not detected : %v", data) + } +} + +func TestStepCreateVmx_Sound(t *testing.T) { + config := map[string]string{ + "sound": `"TRUE"`, + } + provision := map[string]string { + "inline": `"cat /proc/modules | egrep -o '^soundcore' > /dev/fd0"`, + } + + // where to write output + output,vmxData,err := createFloppyOutput("SoundOutput.") + if err != nil { t.Fatalf("Error creating output: %s", err) } + defer func() { if _,err := os.Stat(output); err == nil { os.Remove(output) } }() + config["vmx_data"] = vmxData + t.Logf("Preparing to write output to %s", output) + + // whee + error := setupVMwareBuild(t, config, provision) + if error != nil { t.Errorf("Unable to read file: %s", error) } + + // check the output + data,err := readFloppyOutput(output) + if err != nil { t.Errorf("%s", err) } + + if data != "soundcore\n" { + t.Errorf("Soundcard not detected : %v", data) + } +} From b52e2d3f4575f15fec7e94d48217f55b091d92d6 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Tue, 31 May 2016 14:39:38 -0500 Subject: [PATCH 161/251] Added the ability for the vmware-builder to fallback and determine the network device-name using the .vmx configuration in case of a guest using the "custom" connection type. --- builder/vmware/common/driver.go | 83 +++++++++++++++++++++++++++------ 1 file changed, 68 insertions(+), 15 deletions(-) diff --git a/builder/vmware/common/driver.go b/builder/vmware/common/driver.go index fa2cc0c03..5359bf07d 100644 --- a/builder/vmware/common/driver.go +++ b/builder/vmware/common/driver.go @@ -212,7 +212,8 @@ func compareVersions(versionFound string, versionWanted string, product string) return nil } -// helper functions that read configuration information from a file +/// helper functions that read configuration information from a file +// read the network<->device configuration out of the specified path func readNetmapConfig(path string) (NetworkMap,error) { fd,err := os.Open(path) if err != nil { return nil, err } @@ -220,6 +221,7 @@ func readNetmapConfig(path string) (NetworkMap,error) { return ReadNetworkMap(fd) } +// read the dhcp configuration out of the specified path func readDhcpConfig(path string) (DhcpConfiguration,error) { fd,err := os.Open(path) if err != nil { return nil, err } @@ -227,6 +229,36 @@ func readDhcpConfig(path string) (DhcpConfiguration,error) { return ReadDhcpConfiguration(fd) } +// read the VMX configuration from the specified path +func readVMXConfig(path string) (map[string]string,error) { + f, err := os.Open(path) + if err != nil { + return map[string]string{}, err + } + defer f.Close() + + vmxBytes, err := ioutil.ReadAll(f) + if err != nil { + return map[string]string{}, err + } + return ParseVMX(string(vmxBytes)), nil +} + +// read the connection type out of a vmx configuration +func readCustomDeviceName(vmxData map[string]string) (string,error) { + + connectionType, ok := vmxData["ethernet0.connectiontype"] + if !ok || connectionType != "custom" { + return "", fmt.Errorf("Unable to determine the device name for the connection type : %s", connectionType) + } + + device, ok := vmxData["ethernet0.vnet"] + if !ok || device == "" { + return "", fmt.Errorf("Unable to determine the device name for the connection type \"%s\" : %s", connectionType, device) + } + return device, nil +} + // This VmwareDriver is a base class that contains default methods // that a Driver can use or implement themselves. type VmwareDriver struct { @@ -244,17 +276,8 @@ func (d *VmwareDriver) GuestAddress(state multistep.StateBag) (string,error) { vmxPath := state.Get("vmx_path").(string) log.Println("Lookup up IP information...") - f, err := os.Open(vmxPath) - if err != nil { - return "", err - } - defer f.Close() - - vmxBytes, err := ioutil.ReadAll(f) - if err != nil { - return "", err - } - vmxData := ParseVMX(string(vmxBytes)) + vmxData, err := readVMXConfig(vmxPath) + if err != nil { return "", err } var ok bool macAddress := "" @@ -283,7 +306,17 @@ func (d *VmwareDriver) GuestIP(state multistep.StateBag) (string,error) { // convert the stashed network to a device network := state.Get("vmnetwork").(string) device,err := netmap.NameIntoDevice(network) - if err != nil { return "", err } + + // we were unable to find the device, maybe it's a custom one... + // so, check to see if it's in the .vmx configuration + if err != nil || network == "custom" { + vmxPath := state.Get("vmx_path").(string) + vmxData, err := readVMXConfig(vmxPath) + if err != nil { return "", err } + + device, err = readCustomDeviceName(vmxData) + if err != nil { return "", err } + } // figure out our MAC address for looking up the guest address MACAddress,err := d.GuestAddress(state) @@ -362,7 +395,17 @@ func (d *VmwareDriver) HostAddress(state multistep.StateBag) (string,error) { // convert network to name network := state.Get("vmnetwork").(string) device,err := netmap.NameIntoDevice(network) - if err != nil { return "", err } + + // we were unable to find the device, maybe it's a custom one... + // so, check to see if it's in the .vmx configuration + if err != nil || network == "custom" { + vmxPath := state.Get("vmx_path").(string) + vmxData, err := readVMXConfig(vmxPath) + if err != nil { return "", err } + + device, err = readCustomDeviceName(vmxData) + if err != nil { return "", err } + } // parse dhcpd configuration pathDhcpConfig := d.DhcpConfPath(device) @@ -408,7 +451,17 @@ func (d *VmwareDriver) HostIP(state multistep.StateBag) (string,error) { // convert network to name network := state.Get("vmnetwork").(string) device,err := netmap.NameIntoDevice(network) - if err != nil { return "", err } + + // we were unable to find the device, maybe it's a custom one... + // so, check to see if it's in the .vmx configuration + if err != nil || network == "custom" { + vmxPath := state.Get("vmx_path").(string) + vmxData, err := readVMXConfig(vmxPath) + if err != nil { return "", err } + + device, err = readCustomDeviceName(vmxData) + if err != nil { return "", err } + } // parse dhcpd configuration pathDhcpConfig := d.DhcpConfPath(device) From 0d6cf7fac44869751d31f262b2ab8ee694ed41b9 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Wed, 5 Oct 2016 21:51:42 -0500 Subject: [PATCH 162/251] Added support for auto-detection to the serial and parallel port types. Included the yield option to all the serial port types. Added the ability for the network type to fallback to a custom network if the specified network name is not found in netmap.conf. Promoted the scope for both Read{Dhcp,Netmap}Config inside vmwcommon.driver. Updated the documentation for the VMware builder. --- builder/vmware/common/driver.go | 14 +- builder/vmware/iso/step_create_vmx.go | 197 +++++++++++++++--- .../source/docs/builders/vmware-iso.html.md | 56 +++++ 3 files changed, 233 insertions(+), 34 deletions(-) diff --git a/builder/vmware/common/driver.go b/builder/vmware/common/driver.go index 5359bf07d..0f0bc1fe8 100644 --- a/builder/vmware/common/driver.go +++ b/builder/vmware/common/driver.go @@ -214,7 +214,7 @@ func compareVersions(versionFound string, versionWanted string, product string) /// helper functions that read configuration information from a file // read the network<->device configuration out of the specified path -func readNetmapConfig(path string) (NetworkMap,error) { +func ReadNetmapConfig(path string) (NetworkMap,error) { fd,err := os.Open(path) if err != nil { return nil, err } defer fd.Close() @@ -222,7 +222,7 @@ func readNetmapConfig(path string) (NetworkMap,error) { } // read the dhcp configuration out of the specified path -func readDhcpConfig(path string) (DhcpConfiguration,error) { +func ReadDhcpConfig(path string) (DhcpConfiguration,error) { fd,err := os.Open(path) if err != nil { return nil, err } defer fd.Close() @@ -300,7 +300,7 @@ func (d *VmwareDriver) GuestIP(state multistep.StateBag) (string,error) { if _, err := os.Stat(pathNetmap); err != nil { return "", fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) } - netmap,err := readNetmapConfig(pathNetmap) + netmap,err := ReadNetmapConfig(pathNetmap) if err != nil { return "",err } // convert the stashed network to a device @@ -389,7 +389,7 @@ func (d *VmwareDriver) HostAddress(state multistep.StateBag) (string,error) { if _, err := os.Stat(pathNetmap); err != nil { return "", fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) } - netmap,err := readNetmapConfig(pathNetmap) + netmap,err := ReadNetmapConfig(pathNetmap) if err != nil { return "",err } // convert network to name @@ -413,7 +413,7 @@ func (d *VmwareDriver) HostAddress(state multistep.StateBag) (string,error) { return "", fmt.Errorf("Could not find vmnetdhcp conf file: %s", pathDhcpConfig) } - config,err := readDhcpConfig(pathDhcpConfig) + config,err := ReadDhcpConfig(pathDhcpConfig) if err != nil { return "",err } // find the entry configured in the dhcpd @@ -445,7 +445,7 @@ func (d *VmwareDriver) HostIP(state multistep.StateBag) (string,error) { if _, err := os.Stat(pathNetmap); err != nil { return "", fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) } - netmap,err := readNetmapConfig(pathNetmap) + netmap,err := ReadNetmapConfig(pathNetmap) if err != nil { return "",err } // convert network to name @@ -468,7 +468,7 @@ func (d *VmwareDriver) HostIP(state multistep.StateBag) (string,error) { if _, err := os.Stat(pathDhcpConfig); err != nil { return "", fmt.Errorf("Could not find vmnetdhcp conf file: %s", pathDhcpConfig) } - config,err := readDhcpConfig(pathDhcpConfig) + config,err := ReadDhcpConfig(pathDhcpConfig) if err != nil { return "",err } // find the entry configured in the dhcpd diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index c1f4ea8f3..d37f00969 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -21,7 +21,9 @@ type vmxTemplateData struct { ISOPath string Version string - Network string + Network_Type string + Network_Device string + Sound_Present string Usb_Present string @@ -31,10 +33,12 @@ type vmxTemplateData struct { Serial_Host string Serial_Yield string Serial_Filename string + Serial_Auto string Parallel_Present string Parallel_Bidirectional string Parallel_Filename string + Parallel_Auto string } type additionalDiskTemplateData struct { @@ -65,10 +69,17 @@ type serialConfigPipe struct { type serialConfigFile struct { filename string + yield string } type serialConfigDevice struct { devicename string + yield string +} + +type serialConfigAuto struct { + devicename string + yield string } type serialUnion struct { @@ -76,16 +87,33 @@ type serialUnion struct { pipe *serialConfigPipe file *serialConfigFile device *serialConfigDevice + auto *serialConfigAuto } func unformat_serial(config string) (*serialUnion,error) { - comptype := strings.SplitN(config, ":", 2) - if len(comptype) < 1 { + var defaultSerialPort string + if runtime.GOOS == "windows" { + defaultSerialPort = "COM1" + } else { + defaultSerialPort = "/dev/ttyS0" + } + + input := strings.SplitN(config, ":", 2) + if len(input) < 1 { return nil,fmt.Errorf("Unexpected format for serial port: %s", config) } - switch strings.ToUpper(comptype[0]) { + + var formatType, formatOptions string + formatType = input[0] + if len(input) == 2 { + formatOptions = input[1] + } else { + formatOptions = "" + } + + switch strings.ToUpper(formatType) { case "PIPE": - comp := strings.Split(comptype[1], ",") + comp := strings.Split(formatOptions, ",") if len(comp) < 3 || len(comp) > 4 { return nil,fmt.Errorf("Unexpected format for serial port : pipe : %s", config) } @@ -110,16 +138,65 @@ func unformat_serial(config string) (*serialUnion,error) { return &serialUnion{serialType:res, pipe:res},nil case "FILE": - res := &serialConfigFile{ filename : comptype[1] } + comp := strings.Split(formatOptions, ",") + if len(comp) > 2 { + return nil,fmt.Errorf("Unexpected format for serial port : file : %s", config) + } + + res := &serialConfigFile{ yield : "FALSE" } + + res.filename = filepath.FromSlash(comp[0]) + + res.yield = map[bool]string{true:strings.ToUpper(comp[0]), false:"FALSE"}[len(comp) > 1] + if res.yield != "TRUE" && res.yield != "FALSE" { + return nil,fmt.Errorf("Unexpected format for serial port : file : yield : %s : %s", res.yield, config) + } + return &serialUnion{serialType:res, file:res},nil case "DEVICE": + comp := strings.Split(formatOptions, ",") + if len(comp) > 2 { + return nil,fmt.Errorf("Unexpected format for serial port : device : %s", config) + } + res := new(serialConfigDevice) - res.devicename = map[bool]string{true:strings.ToUpper(comptype[1]), false:"COM1"}[len(comptype[1]) > 0] + + if len(comp) == 2 { + res.devicename = map[bool]string{true:filepath.FromSlash(comp[0]), false:defaultSerialPort}[len(comp[0]) > 0] + res.yield = strings.ToUpper(comp[1]) + } else if len(comp) == 1 { + res.devicename = map[bool]string{true:filepath.FromSlash(comp[0]), false:defaultSerialPort}[len(comp[0]) > 0] + res.yield = "FALSE" + } else if len(comp) == 0 { + res.devicename = defaultSerialPort + res.yield = "FALSE" + } + + if res.yield != "TRUE" && res.yield != "FALSE" { + return nil,fmt.Errorf("Unexpected format for serial port : device : yield : %s : %s", res.yield, config) + } + return &serialUnion{serialType:res, device:res},nil + case "AUTO": + res := new(serialConfigAuto) + res.devicename = defaultSerialPort + + if len(formatOptions) > 0 { + res.yield = strings.ToUpper(formatOptions) + } else { + res.yield = "FALSE" + } + + if res.yield != "TRUE" && res.yield != "FALSE" { + return nil,fmt.Errorf("Unexpected format for serial port : auto : yield : %s : %s", res.yield, config) + } + + return &serialUnion{serialType:res, auto:res},nil + default: - return nil,fmt.Errorf("Unknown serial type : %s : %s", strings.ToUpper(comptype[0]), config) + return nil,fmt.Errorf("Unknown serial type : %s : %s", strings.ToUpper(formatType), config) } } @@ -128,6 +205,7 @@ type parallelUnion struct { parallelType interface{} file *parallelPortFile device *parallelPortDevice + auto *parallelPortAuto } type parallelPortFile struct { filename string @@ -136,18 +214,30 @@ type parallelPortDevice struct { bidirectional string devicename string } +type parallelPortAuto struct { + bidirectional string +} func unformat_parallel(config string) (*parallelUnion,error) { - comptype := strings.SplitN(config, ":", 2) - if len(comptype) < 1 { + input := strings.SplitN(config, ":", 2) + if len(input) < 1 { return nil,fmt.Errorf("Unexpected format for parallel port: %s", config) } - switch strings.ToUpper(comptype[0]) { + + var formatType, formatOptions string + formatType = input[0] + if len(input) == 2 { + formatOptions = input[1] + } else { + formatOptions = "" + } + + switch strings.ToUpper(formatType) { case "FILE": - res := ¶llelPortFile{ filename: comptype[1] } + res := ¶llelPortFile{ filename: filepath.FromSlash(formatOptions) } return ¶llelUnion{ parallelType:res, file: res},nil case "DEVICE": - comp := strings.Split(comptype[1], ",") + comp := strings.Split(formatOptions, ",") if len(comp) < 1 || len(comp) > 2 { return nil,fmt.Errorf("Unexpected format for parallel port: %s", config) } @@ -156,15 +246,24 @@ func unformat_parallel(config string) (*parallelUnion,error) { res.devicename = strings.ToUpper(comp[0]) if len(comp) > 1 { switch strings.ToUpper(comp[1]) { - case "BI": - res.bidirectional = "TRUE" - case "UNI": - res.bidirectional = "FALSE" + case "BI": res.bidirectional = "TRUE" + case "UNI": res.bidirectional = "FALSE" default: return nil,fmt.Errorf("Unknown parallel port direction : %s : %s", strings.ToUpper(comp[0]), config) } } - return ¶llelUnion{ parallelType:res, device: res},nil + return ¶llelUnion{ parallelType:res, device:res },nil + + case "AUTO": + res := new(parallelPortAuto) + switch strings.ToUpper(formatOptions) { + case "": fallthrough + case "UNI": res.bidirectional = "FALSE" + case "BI": res.bidirectional = "TRUE" + default: + return nil,fmt.Errorf("Unknown parallel port direction : %s : %s", strings.ToUpper(formatOptions), config) + } + return ¶llelUnion{ parallelType:res, auto:res },nil } return nil,fmt.Errorf("Unexpected format for parallel port: %s", config) } @@ -249,7 +348,6 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist Version: config.Version, ISOPath: isoPath, - Network: config.Network, Sound_Present: map[bool]string{true:"TRUE",false:"FALSE"}[bool(config.Sound)], Usb_Present: map[bool]string{true:"TRUE",false:"FALSE"}[bool(config.USB)], @@ -257,10 +355,42 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist Parallel_Present: "FALSE", } - // store the network so that we can later figure out what ip address to bind to - state.Put("vmnetwork", config.Network) + /// Check the network type that the user specified + network := config.Network + driver := state.Get("driver").(vmwcommon.VmwareDriver) - // check if serial port has been configured + // read netmap config + pathNetmap := driver.NetmapConfPath() + if _, err := os.Stat(pathNetmap); err != nil { + err := fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + netmap,res := vmwcommon.ReadNetmapConfig(pathNetmap) + if res != nil { + err := fmt.Errorf("Unable to read netmap conf file: %s: %v", pathNetmap, res) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + // try and convert the specified network to a device + device,err := netmap.NameIntoDevice(network) + + // success. so we know that it's an actual network type inside netmap.conf + if err == nil { + templateData.Network_Type = network + templateData.Network_Device = device + // we were unable to find the type, so assume it's a custom network device. + } else { + templateData.Network_Type = "custom" + templateData.Network_Device = network + } + // store the network so that we can later figure out what ip address to bind to + state.Put("vmnetwork", network) + + /// check if serial port has been configured if config.Serial != "" { serial,err := unformat_serial(config.Serial) if err != nil { @@ -275,6 +405,7 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist templateData.Serial_Yield = "" templateData.Serial_Endpoint = "" templateData.Serial_Host = "" + templateData.Serial_Auto = "FALSE" switch serial.serialType.(type) { case *serialConfigPipe: @@ -289,6 +420,12 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist case *serialConfigDevice: templateData.Serial_Type = "device" templateData.Serial_Filename = filepath.FromSlash(serial.device.devicename) + case *serialConfigAuto: + templateData.Serial_Type = "device" + templateData.Serial_Filename = filepath.FromSlash(serial.auto.devicename) + templateData.Serial_Yield = serial.auto.yield + templateData.Serial_Auto = "TRUE" + default: err := fmt.Errorf("Error procesing VMX template: %v", serial) state.Put("error", err) @@ -297,7 +434,7 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist } } - // check if parallel port has been configured + /// check if parallel port has been configured if config.Parallel != "" { parallel,err := unformat_parallel(config.Parallel) if err != nil { @@ -307,6 +444,7 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist return multistep.ActionHalt } + templateData.Parallel_Auto = "FALSE" switch parallel.parallelType.(type) { case *parallelPortFile: templateData.Parallel_Present = "TRUE" @@ -315,6 +453,10 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist templateData.Parallel_Present = "TRUE" templateData.Parallel_Bidirectional = parallel.device.bidirectional templateData.Parallel_Filename = filepath.FromSlash(parallel.device.devicename) + case *parallelPortAuto: + templateData.Parallel_Present = "TRUE" + templateData.Parallel_Auto = "TRUE" + templateData.Parallel_Bidirectional = parallel.auto.bidirectional default: err := fmt.Errorf("Error procesing VMX template: %v", parallel) state.Put("error", err) @@ -325,7 +467,7 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist ctx.Data = &templateData - // render the .vmx template + /// render the .vmx template vmxContents, err := interpolate.Render(vmxTemplate, &ctx) if err != nil { err := fmt.Errorf("Error procesing VMX template: %s", err) @@ -383,7 +525,8 @@ ehci.pciSlotNumber = "34" ehci.present = "TRUE" ethernet0.addressType = "generated" ethernet0.bsdName = "en0" -ethernet0.connectionType = "{{ .Network }}" +ethernet0.connectionType = "{{ .Network_Type }}" +ethernet0.vnet = "{{ .Network_Device }}" ethernet0.displayName = "Ethernet" ethernet0.linkStatePropagation.enable = "FALSE" ethernet0.pciSlotNumber = "33" @@ -453,7 +596,7 @@ usb_xhci.present = "TRUE" serial0.present = "{{ .Serial_Present }}" serial0.startConnected = "{{ .Serial_Present }}" serial0.fileName = "{{ .Serial_Filename }}" -serial0.autodetect = "TRUE" +serial0.autodetect = "{{ .Serial_Auto }}" serial0.fileType = "{{ .Serial_Type }}" serial0.yieldOnMsrRead = "{{ .Serial_Yield }}" serial0.pipe.endPoint = "{{ .Serial_Endpoint }}" @@ -463,7 +606,7 @@ serial0.tryNoRxLoss = "{{ .Serial_Host }}" parallel0.present = "{{ .Parallel_Present }}" parallel0.startConnected = "{{ .Parallel_Present }}" parallel0.fileName = "{{ .Parallel_Filename }}" -parallel0.autodetect = "TRUE" +parallel0.autodetect = "{{ .Parallel_Auto }}" parallel0.bidirectional = "{{ .Parallel_Bidirectional }}" virtualHW.productCompatibility = "hosted" diff --git a/website/source/docs/builders/vmware-iso.html.md b/website/source/docs/builders/vmware-iso.html.md index 256bf63e4..44285a850 100644 --- a/website/source/docs/builders/vmware-iso.html.md +++ b/website/source/docs/builders/vmware-iso.html.md @@ -193,6 +193,11 @@ builder. URLs must point to the same file (same checksum). By default this is empty and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. +- `network` (string) - This is the network type that the virtual machine will + be created with. This can be one of the generic values that map to a device + such as "hostonly", "nat", or "bridged". If the network is not one of these + values, then it is assumed to be a VMware network device. (VMnet0..x) + - `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` @@ -200,6 +205,19 @@ builder. the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the name of the build. +- `parallel` (string) - This specifies a parallel port to add to the VM. It + has the format of `Type:option1,option2,...`. Type can be one of the + following values: "FILE", "DEVICE", or "AUTO". + + * `FILE:path` - Specifies the path to the local file to be used for the + parallel port. + * `DEVICE:path` - Specifies the path to the local device to be used for the + parallel port. + * `AUTO:direction` - Specifies to use auto-detection to determine the + parallel port. Direction can be `BI` to specify + bidirectional communication or `UNI` to specify + unidirectional communication. + - `remote_cache_datastore` (string) - The path to the datastore where supporting files will be stored during the build on the remote machine. By default this is the same as the `remote_datastore` option. This only has an @@ -234,6 +252,40 @@ builder. - `remote_username` (string) - The username for the SSH user that will access the remote machine. This is required if `remote_type` is enabled. +- `serial` (string) - This specifies a serial port to add to the VM. + It has a format of `Type:option1,option2,...`. The field `Type` can be one + of the following values: `FILE`, `DEVICE`, `PIPE`, or `AUTO`. + + * `FILE:path(,yield)` - Specifies the path to the local file to be used as the + serial port. + * `yield` (bool) - This is an optional boolean that specifies whether + the vm should yield the cpu when polling the port. + By default, the builder will assume this as `FALSE`. + * `DEVICE:path(,yield)` - Specifies the path to the local device to be used + as the serial port. If `path` is empty, then + default to the first serial port. + * `yield` (bool) - This is an optional boolean that specifies whether + the vm should yield the cpu when polling the port. + By default, the builder will assume this as `FALSE`. + * `PIPE:path,endpoint,host(,yield)` - Specifies to use the named-pipe "path" + as a serial port. This has a few + options that determine how the VM + should use the named-pipe. + * `endpoint` (string) - Chooses the type of the VM-end, which can be + either a `client` or `server`. + * `host` (string) - Chooses the type of the host-end, which can be either + an `app` (application) or `vm` (another virtual-machine). + * `yield` (bool) - This is an optional boolean that specifies whether + the vm should yield the cpu when polling the port. + By default, the builder will assume this as `FALSE`. + + * `AUTO:(yield)` - Specifies to use auto-detection to determine the serial + port to use. This has one option to determine how the VM + should support the serial port. + * `yield` (bool) - This is an optional boolean that specifies whether + the vm should yield the cpu when polling the port. + By default, the builder will assume this as `FALSE`. + - `shutdown_command` (string) - The command to use to gracefully shut down the machine once all the provisioning is done. By default this is an empty string, which tells Packer to just forcefully shut down the machine. @@ -264,6 +316,8 @@ builder. `--noSSLVerify`, `--skipManifestCheck`, and `--targetType` are reserved, and should not be passed to this argument. +- `sound` (boolean) - Enable VMware's virtual soundcard device for the VM. + - `tools_upload_flavor` (string) - The flavor of the VMware Tools ISO to upload into the VM. Valid values are "darwin", "linux", and "windows". By default, this is empty, which means VMware tools won't be uploaded. @@ -276,6 +330,8 @@ builder. By default the upload path is set to `{{.Flavor}}.iso`. This setting is not used when `remote_type` is "esx5". +- `usb` (boolean) - Enable VMware's USB bus for the VM. + - `version` (string) - The [vmx hardware version](http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003746) for the new virtual machine. Only the default value has been tested, any From 884af69da14c132a378a6fb3cc10f9501fdbf03b Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Mon, 27 Feb 2017 15:34:53 -0600 Subject: [PATCH 163/251] go fmt on builder/vmware/* --- builder/vmware/common/driver.go | 158 ++- builder/vmware/common/driver_mock.go | 2 +- builder/vmware/common/driver_parser.go | 1122 +++++++++-------- builder/vmware/common/driver_player_unix.go | 2 +- builder/vmware/common/driver_workstation10.go | 1 - builder/vmware/iso/builder.go | 29 +- builder/vmware/iso/step_create_vmx.go | 360 +++--- builder/vmware/iso/step_create_vmx_test.go | 234 ++-- 8 files changed, 1075 insertions(+), 833 deletions(-) diff --git a/builder/vmware/common/driver.go b/builder/vmware/common/driver.go index 0f0bc1fe8..745b48d69 100644 --- a/builder/vmware/common/driver.go +++ b/builder/vmware/common/driver.go @@ -1,19 +1,19 @@ package common import ( - "errors" "bytes" + "errors" "fmt" + "io/ioutil" "log" + "net" "os" "os/exec" - "io/ioutil" "regexp" "runtime" "strconv" "strings" "time" - "net" "github.com/hashicorp/packer/helper/multistep" ) @@ -214,23 +214,27 @@ func compareVersions(versionFound string, versionWanted string, product string) /// helper functions that read configuration information from a file // read the network<->device configuration out of the specified path -func ReadNetmapConfig(path string) (NetworkMap,error) { - fd,err := os.Open(path) - if err != nil { return nil, err } +func ReadNetmapConfig(path string) (NetworkMap, error) { + fd, err := os.Open(path) + if err != nil { + return nil, err + } defer fd.Close() return ReadNetworkMap(fd) } // read the dhcp configuration out of the specified path -func ReadDhcpConfig(path string) (DhcpConfiguration,error) { - fd,err := os.Open(path) - if err != nil { return nil, err } +func ReadDhcpConfig(path string) (DhcpConfiguration, error) { + fd, err := os.Open(path) + if err != nil { + return nil, err + } defer fd.Close() return ReadDhcpConfiguration(fd) } // read the VMX configuration from the specified path -func readVMXConfig(path string) (map[string]string,error) { +func readVMXConfig(path string) (map[string]string, error) { f, err := os.Open(path) if err != nil { return map[string]string{}, err @@ -245,7 +249,7 @@ func readVMXConfig(path string) (map[string]string,error) { } // read the connection type out of a vmx configuration -func readCustomDeviceName(vmxData map[string]string) (string,error) { +func readCustomDeviceName(vmxData map[string]string) (string, error) { connectionType, ok := vmxData["ethernet0.connectiontype"] if !ok || connectionType != "custom" { @@ -266,18 +270,20 @@ type VmwareDriver struct { /// A driver must overload these in order to point to the correct /// files so that the address detection (ip and ethernet) machinery /// works. - DhcpLeasesPath func(string) string - DhcpConfPath func(string) string + DhcpLeasesPath func(string) string + DhcpConfPath func(string) string VmnetnatConfPath func(string) string - NetmapConfPath func() string + NetmapConfPath func() string } -func (d *VmwareDriver) GuestAddress(state multistep.StateBag) (string,error) { +func (d *VmwareDriver) GuestAddress(state multistep.StateBag) (string, error) { vmxPath := state.Get("vmx_path").(string) log.Println("Lookup up IP information...") vmxData, err := readVMXConfig(vmxPath) - if err != nil { return "", err } + if err != nil { + return "", err + } var ok bool macAddress := "" @@ -287,40 +293,50 @@ func (d *VmwareDriver) GuestAddress(state multistep.StateBag) (string,error) { } } - res,err := net.ParseMAC(macAddress) - if err != nil { return "", err } + res, err := net.ParseMAC(macAddress) + if err != nil { + return "", err + } - return res.String(),nil + return res.String(), nil } -func (d *VmwareDriver) GuestIP(state multistep.StateBag) (string,error) { +func (d *VmwareDriver) GuestIP(state multistep.StateBag) (string, error) { // read netmap config pathNetmap := d.NetmapConfPath() if _, err := os.Stat(pathNetmap); err != nil { return "", fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) } - netmap,err := ReadNetmapConfig(pathNetmap) - if err != nil { return "",err } + netmap, err := ReadNetmapConfig(pathNetmap) + if err != nil { + return "", err + } // convert the stashed network to a device network := state.Get("vmnetwork").(string) - device,err := netmap.NameIntoDevice(network) + device, err := netmap.NameIntoDevice(network) // we were unable to find the device, maybe it's a custom one... // so, check to see if it's in the .vmx configuration if err != nil || network == "custom" { vmxPath := state.Get("vmx_path").(string) vmxData, err := readVMXConfig(vmxPath) - if err != nil { return "", err } + if err != nil { + return "", err + } device, err = readCustomDeviceName(vmxData) - if err != nil { return "", err } + if err != nil { + return "", err + } } // figure out our MAC address for looking up the guest address - MACAddress,err := d.GuestAddress(state) - if err != nil { return "", err } + MACAddress, err := d.GuestAddress(state) + if err != nil { + return "", err + } // figure out the correct dhcp leases dhcpLeasesPath := d.DhcpLeasesPath(device) @@ -382,29 +398,35 @@ func (d *VmwareDriver) GuestIP(state multistep.StateBag) (string,error) { return curIp, nil } -func (d *VmwareDriver) HostAddress(state multistep.StateBag) (string,error) { +func (d *VmwareDriver) HostAddress(state multistep.StateBag) (string, error) { // parse network<->device mapping pathNetmap := d.NetmapConfPath() if _, err := os.Stat(pathNetmap); err != nil { return "", fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) } - netmap,err := ReadNetmapConfig(pathNetmap) - if err != nil { return "",err } + netmap, err := ReadNetmapConfig(pathNetmap) + if err != nil { + return "", err + } // convert network to name network := state.Get("vmnetwork").(string) - device,err := netmap.NameIntoDevice(network) + device, err := netmap.NameIntoDevice(network) // we were unable to find the device, maybe it's a custom one... // so, check to see if it's in the .vmx configuration if err != nil || network == "custom" { vmxPath := state.Get("vmx_path").(string) vmxData, err := readVMXConfig(vmxPath) - if err != nil { return "", err } + if err != nil { + return "", err + } device, err = readCustomDeviceName(vmxData) - if err != nil { return "", err } + if err != nil { + return "", err + } } // parse dhcpd configuration @@ -413,54 +435,68 @@ func (d *VmwareDriver) HostAddress(state multistep.StateBag) (string,error) { return "", fmt.Errorf("Could not find vmnetdhcp conf file: %s", pathDhcpConfig) } - config,err := ReadDhcpConfig(pathDhcpConfig) - if err != nil { return "",err } + config, err := ReadDhcpConfig(pathDhcpConfig) + if err != nil { + return "", err + } // find the entry configured in the dhcpd - interfaceConfig,err := config.HostByName(device) - if err != nil { return "", err } + interfaceConfig, err := config.HostByName(device) + if err != nil { + return "", err + } // finally grab the hardware address - address,err := interfaceConfig.Hardware() - if err == nil { return address.String(), nil } + address, err := interfaceConfig.Hardware() + if err == nil { + return address.String(), nil + } // we didn't find it, so search through our interfaces for the device name - interfaceList,err := net.Interfaces() - if err == nil { return "", err } + interfaceList, err := net.Interfaces() + if err == nil { + return "", err + } names := make([]string, 0) - for _,intf := range interfaceList { - if strings.HasSuffix( strings.ToLower(intf.Name), device ) { - return intf.HardwareAddr.String(),nil + for _, intf := range interfaceList { + if strings.HasSuffix(strings.ToLower(intf.Name), device) { + return intf.HardwareAddr.String(), nil } names = append(names, intf.Name) } - return "",fmt.Errorf("Unable to find device %s : %v", device, names) + return "", fmt.Errorf("Unable to find device %s : %v", device, names) } -func (d *VmwareDriver) HostIP(state multistep.StateBag) (string,error) { +func (d *VmwareDriver) HostIP(state multistep.StateBag) (string, error) { // parse network<->device mapping pathNetmap := d.NetmapConfPath() if _, err := os.Stat(pathNetmap); err != nil { return "", fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) } - netmap,err := ReadNetmapConfig(pathNetmap) - if err != nil { return "",err } + netmap, err := ReadNetmapConfig(pathNetmap) + if err != nil { + return "", err + } // convert network to name network := state.Get("vmnetwork").(string) - device,err := netmap.NameIntoDevice(network) + device, err := netmap.NameIntoDevice(network) // we were unable to find the device, maybe it's a custom one... // so, check to see if it's in the .vmx configuration if err != nil || network == "custom" { vmxPath := state.Get("vmx_path").(string) vmxData, err := readVMXConfig(vmxPath) - if err != nil { return "", err } + if err != nil { + return "", err + } device, err = readCustomDeviceName(vmxData) - if err != nil { return "", err } + if err != nil { + return "", err + } } // parse dhcpd configuration @@ -468,15 +504,21 @@ func (d *VmwareDriver) HostIP(state multistep.StateBag) (string,error) { if _, err := os.Stat(pathDhcpConfig); err != nil { return "", fmt.Errorf("Could not find vmnetdhcp conf file: %s", pathDhcpConfig) } - config,err := ReadDhcpConfig(pathDhcpConfig) - if err != nil { return "",err } + config, err := ReadDhcpConfig(pathDhcpConfig) + if err != nil { + return "", err + } // find the entry configured in the dhcpd - interfaceConfig,err := config.HostByName(device) - if err != nil { return "", err } + interfaceConfig, err := config.HostByName(device) + if err != nil { + return "", err + } - address,err := interfaceConfig.IP4() - if err != nil { return "", err } + address, err := interfaceConfig.IP4() + if err != nil { + return "", err + } - return address.String(),nil + return address.String(), nil } diff --git a/builder/vmware/common/driver_mock.go b/builder/vmware/common/driver_mock.go index da28cd474..562eab382 100644 --- a/builder/vmware/common/driver_mock.go +++ b/builder/vmware/common/driver_mock.go @@ -83,7 +83,7 @@ type DriverMock struct { VmnetnatConfPathCalled bool VmnetnatConfPathResult string - + NetmapConfPathCalled bool NetmapConfPathResult string diff --git a/builder/vmware/common/driver_parser.go b/builder/vmware/common/driver_parser.go index 7071e424d..dbf15b5b5 100644 --- a/builder/vmware/common/driver_parser.go +++ b/builder/vmware/common/driver_parser.go @@ -1,12 +1,13 @@ package common + import ( "fmt" - "os" "io" - "strings" - "strconv" "net" + "os" "sort" + "strconv" + "strings" ) type sentinelSignaller chan struct{} @@ -20,16 +21,20 @@ func uncomment(eof sentinelSignaller, in <-chan byte) chan byte { var endofline bool for stillReading := true; stillReading; { select { - case <-eof: - stillReading = false - case ch := <-in: - switch ch { - case '#': - endofline = true - case '\n': - if endofline { endofline = false } + case <-eof: + stillReading = false + case ch := <-in: + switch ch { + case '#': + endofline = true + case '\n': + if endofline { + endofline = false } - if !endofline { out <- ch } + } + if !endofline { + out <- ch + } } } }(in, out) @@ -46,62 +51,71 @@ func tokenizeDhcpConfig(eof sentinelSignaller, in chan byte) chan string { go func(out chan string) { for stillReading := true; stillReading; { select { - case <-eof: - stillReading = false + case <-eof: + stillReading = false - case ch = <-in: - if quote { - if ch == '"' { - out <- state + string(ch) - state,quote = "",false - continue - } - state += string(ch) + case ch = <-in: + if quote { + if ch == '"' { + out <- state + string(ch) + state, quote = "", false continue } + state += string(ch) + continue + } - switch ch { - case '"': - quote = true - state += string(ch) - continue + switch ch { + case '"': + quote = true + state += string(ch) + continue - case '\r': - fallthrough - case '\n': - fallthrough - case '\t': - fallthrough - case ' ': - if len(state) == 0 { continue } - out <- state - state = "" - - case '{': fallthrough - case '}': fallthrough - case ';': - if len(state) > 0 { out <- state } - out <- string(ch) - state = "" - - default: - state += string(ch) + case '\r': + fallthrough + case '\n': + fallthrough + case '\t': + fallthrough + case ' ': + if len(state) == 0 { + continue } + out <- state + state = "" + + case '{': + fallthrough + case '}': + fallthrough + case ';': + if len(state) > 0 { + out <- state + } + out <- string(ch) + state = "" + + default: + state += string(ch) + } } } - if len(state) > 0 { out <- state } + if len(state) > 0 { + out <- state + } }(out) return out } /** mid-level parsing */ type tkParameter struct { - name string + name string operand []string } + func (e *tkParameter) String() string { var values []string - for _,val := range e.operand { + for _, val := range e.operand { values = append(values, val) } return fmt.Sprintf("%s [%s]", e.name, strings.Join(values, ",")) @@ -109,21 +123,22 @@ func (e *tkParameter) String() string { type tkGroup struct { parent *tkGroup - id tkParameter + id tkParameter groups []*tkGroup params []tkParameter } + func (e *tkGroup) String() string { var id []string id = append(id, e.id.name) - for _,val := range e.id.operand { + for _, val := range e.id.operand { id = append(id, val) } var config []string - for _,val := range e.params { + for _, val := range e.params { config = append(config, val.String()) } return fmt.Sprintf("%s {\n%s\n}", strings.Join(id, " "), strings.Join(config, "\n")) @@ -139,11 +154,14 @@ func parseTokenParameter(in chan string) tkParameter { continue } switch token { - case "{": fallthrough - case "}": fallthrough - case ";": goto leave - default: - result.operand = append(result.operand, token) + case "{": + fallthrough + case "}": + fallthrough + case ";": + goto leave + default: + result.operand = append(result.operand, token) } } leave: @@ -151,50 +169,52 @@ leave: } // convert a channel of pseudo-tokens into an tkGroup tree */ -func parseDhcpConfig(eof sentinelSignaller, in chan string) (tkGroup,error) { +func parseDhcpConfig(eof sentinelSignaller, in chan string) (tkGroup, error) { var tokens []string var result tkGroup toParameter := func(tokens []string) tkParameter { out := make(chan string) - go func(out chan string){ - for _,v := range tokens { out <- v } + go func(out chan string) { + for _, v := range tokens { + out <- v + } out <- ";" }(out) return parseTokenParameter(out) } - for stillReading,currentgroup := true,&result; stillReading; { + for stillReading, currentgroup := true, &result; stillReading; { select { - case <-eof: - stillReading = false + case <-eof: + stillReading = false - case tk := <-in: - switch tk { - case "{": - grp := &tkGroup{parent:currentgroup} - grp.id = toParameter(tokens) - currentgroup.groups = append(currentgroup.groups, grp) - currentgroup = grp - case "}": - if currentgroup.parent == nil { - return tkGroup{}, fmt.Errorf("Unable to close the global declaration") - } - if len(tokens) > 0 { - return tkGroup{}, fmt.Errorf("List of tokens was left unterminated : %v", tokens) - } - currentgroup = currentgroup.parent - case ";": - arg := toParameter(tokens) - currentgroup.params = append(currentgroup.params, arg) - default: - tokens = append(tokens, tk) - continue + case tk := <-in: + switch tk { + case "{": + grp := &tkGroup{parent: currentgroup} + grp.id = toParameter(tokens) + currentgroup.groups = append(currentgroup.groups, grp) + currentgroup = grp + case "}": + if currentgroup.parent == nil { + return tkGroup{}, fmt.Errorf("Unable to close the global declaration") } - tokens = []string{} + if len(tokens) > 0 { + return tkGroup{}, fmt.Errorf("List of tokens was left unterminated : %v", tokens) + } + currentgroup = currentgroup.parent + case ";": + arg := toParameter(tokens) + currentgroup.params = append(currentgroup.params, arg) + default: + tokens = append(tokens, tk) + continue + } + tokens = []string{} } } - return result,nil + return result, nil } func tokenizeNetworkMapConfig(eof sentinelSignaller, in chan byte) chan string { @@ -207,70 +227,85 @@ func tokenizeNetworkMapConfig(eof sentinelSignaller, in chan byte) chan string { go func(out chan string) { for stillReading := true; stillReading; { select { - case <-eof: - stillReading = false + case <-eof: + stillReading = false - case ch = <-in: - if quote { - if ch == '"' { - out <- state + string(ch) - state,quote = "",false - continue - } - state += string(ch) + case ch = <-in: + if quote { + if ch == '"' { + out <- state + string(ch) + state, quote = "", false continue } + state += string(ch) + continue + } - switch ch { - case '"': - quote = true - state += string(ch) - continue + switch ch { + case '"': + quote = true + state += string(ch) + continue - case '\r': - fallthrough - case '\t': - fallthrough - case ' ': - if len(state) == 0 { continue } - out <- state - state = "" - - case '\n': - if lastnewline { continue } - if len(state) > 0 { out <- state } - out <- string(ch) - state = "" - lastnewline = true - continue - - case '.': fallthrough - case '=': - if len(state) > 0 { out <- state } - out <- string(ch) - state = "" - - default: - state += string(ch) + case '\r': + fallthrough + case '\t': + fallthrough + case ' ': + if len(state) == 0 { + continue } - lastnewline = false + out <- state + state = "" + + case '\n': + if lastnewline { + continue + } + if len(state) > 0 { + out <- state + } + out <- string(ch) + state = "" + lastnewline = true + continue + + case '.': + fallthrough + case '=': + if len(state) > 0 { + out <- state + } + out <- string(ch) + state = "" + + default: + state += string(ch) + } + lastnewline = false } } - if len(state) > 0 { out <- state } + if len(state) > 0 { + out <- state + } }(out) return out } -func parseNetworkMapConfig(eof sentinelSignaller, in chan string) (NetworkMap,error) { +func parseNetworkMapConfig(eof sentinelSignaller, in chan string) (NetworkMap, error) { var unsorted map[string]map[string]string var state []string addResult := func(network string, attribute string, value string) error { - _,ok := unsorted[network] - if !ok { unsorted[network] = make(map[string]string) } + _, ok := unsorted[network] + if !ok { + unsorted[network] = make(map[string]string) + } - val,err := strconv.Unquote(value) - if err != nil { return err } + val, err := strconv.Unquote(value) + if err != nil { + return err + } current := unsorted[network] current[attribute] = val @@ -280,135 +315,174 @@ func parseNetworkMapConfig(eof sentinelSignaller, in chan string) (NetworkMap,er stillReading := true for unsorted = make(map[string]map[string]string); stillReading; { select { - case <-eof: - if len(state) == 3 { - err := addResult(state[0], state[1], state[2]) - if err != nil { return nil,err } + case <-eof: + if len(state) == 3 { + err := addResult(state[0], state[1], state[2]) + if err != nil { + return nil, err } - stillReading = false - case tk := <-in: - switch tk { - case ".": - if len(state) != 1 { return nil,fmt.Errorf("Missing network index") } - case "=": - if len(state) != 2 { return nil,fmt.Errorf("Assignment to empty attribute") } - case "\n": - if len(state) == 0 { continue } - if len(state) != 3 { return nil,fmt.Errorf("Invalid attribute assignment : %v", state) } - err := addResult(state[0], state[1], state[2]) - if err != nil { return nil,err } - state = make([]string, 0) - default: - state = append(state, tk) + } + stillReading = false + case tk := <-in: + switch tk { + case ".": + if len(state) != 1 { + return nil, fmt.Errorf("Missing network index") } + case "=": + if len(state) != 2 { + return nil, fmt.Errorf("Assignment to empty attribute") + } + case "\n": + if len(state) == 0 { + continue + } + if len(state) != 3 { + return nil, fmt.Errorf("Invalid attribute assignment : %v", state) + } + err := addResult(state[0], state[1], state[2]) + if err != nil { + return nil, err + } + state = make([]string, 0) + default: + state = append(state, tk) + } } } result := make([]map[string]string, 0) var keys []string - for k := range unsorted { keys = append(keys, k) } + for k := range unsorted { + keys = append(keys, k) + } sort.Strings(keys) - for _,k := range keys { + for _, k := range keys { result = append(result, unsorted[k]) } - return result,nil + return result, nil } /** higher-level parsing */ /// parameters -type pParameter interface { repr() string } +type pParameter interface { + repr() string +} type pParameterInclude struct { filename string } -func (e pParameterInclude) repr() string { return fmt.Sprintf("include-file:filename=%s",e.filename) } + +func (e pParameterInclude) repr() string { return fmt.Sprintf("include-file:filename=%s", e.filename) } type pParameterOption struct { - name string + name string value string } -func (e pParameterOption) repr() string { return fmt.Sprintf("option:%s=%s",e.name,e.value) } + +func (e pParameterOption) repr() string { return fmt.Sprintf("option:%s=%s", e.name, e.value) } // allow some-kind-of-something type pParameterGrant struct { - verb string // allow,deny,ignore + verb string // allow,deny,ignore attribute string } -func (e pParameterGrant) repr() string { return fmt.Sprintf("grant:%s,%s",e.verb,e.attribute) } + +func (e pParameterGrant) repr() string { return fmt.Sprintf("grant:%s,%s", e.verb, e.attribute) } type pParameterAddress4 []string + func (e pParameterAddress4) repr() string { - return fmt.Sprintf("fixed-address4:%s",strings.Join(e,",")) + return fmt.Sprintf("fixed-address4:%s", strings.Join(e, ",")) } type pParameterAddress6 []string + func (e pParameterAddress6) repr() string { - return fmt.Sprintf("fixed-address6:%s",strings.Join(e,",")) + return fmt.Sprintf("fixed-address6:%s", strings.Join(e, ",")) } // hardware address 00:00:00:00:00:00 type pParameterHardware struct { - class string + class string address []byte } + func (e pParameterHardware) repr() string { res := make([]string, 0) - for _,v := range e.address { - res = append(res, fmt.Sprintf("%02x",v)) + for _, v := range e.address { + res = append(res, fmt.Sprintf("%02x", v)) } - return fmt.Sprintf("hardware-address:%s[%s]",e.class,strings.Join(res,":")) + return fmt.Sprintf("hardware-address:%s[%s]", e.class, strings.Join(res, ":")) } type pParameterBoolean struct { parameter string - truancy bool + truancy bool } -func (e pParameterBoolean) repr() string { return fmt.Sprintf("boolean:%s=%v",e.parameter,e.truancy) } + +func (e pParameterBoolean) repr() string { return fmt.Sprintf("boolean:%s=%v", e.parameter, e.truancy) } type pParameterClientMatch struct { name string data string } -func (e pParameterClientMatch) repr() string { return fmt.Sprintf("match-client:%s=%s",e.name,e.data) } + +func (e pParameterClientMatch) repr() string { return fmt.Sprintf("match-client:%s=%s", e.name, e.data) } // range 127.0.0.1 127.0.0.255 type pParameterRange4 struct { min net.IP max net.IP } -func (e pParameterRange4) repr() string { return fmt.Sprintf("range4:%s-%s",e.min.String(),e.max.String()) } + +func (e pParameterRange4) repr() string { + return fmt.Sprintf("range4:%s-%s", e.min.String(), e.max.String()) +} type pParameterRange6 struct { min net.IP max net.IP } -func (e pParameterRange6) repr() string { return fmt.Sprintf("range6:%s-%s",e.min.String(),e.max.String()) } + +func (e pParameterRange6) repr() string { + return fmt.Sprintf("range6:%s-%s", e.min.String(), e.max.String()) +} type pParameterPrefix6 struct { - min net.IP - max net.IP + min net.IP + max net.IP bits int } -func (e pParameterPrefix6) repr() string { return fmt.Sprintf("prefix6:/%d:%s-%s",e.bits,e.min.String(),e.max.String()) } + +func (e pParameterPrefix6) repr() string { + return fmt.Sprintf("prefix6:/%d:%s-%s", e.bits, e.min.String(), e.max.String()) +} // some-kind-of-parameter 1024 type pParameterOther struct { parameter string - value string + value string } -func (e pParameterOther) repr() string { return fmt.Sprintf("parameter:%s=%s",e.parameter,e.value) } + +func (e pParameterOther) repr() string { return fmt.Sprintf("parameter:%s=%s", e.parameter, e.value) } type pParameterExpression struct { - parameter string + parameter string expression string } -func (e pParameterExpression) repr() string { return fmt.Sprintf("parameter-expression:%s=\"%s\"",e.parameter,e.expression) } -type pDeclarationIdentifier interface { repr() string } +func (e pParameterExpression) repr() string { + return fmt.Sprintf("parameter-expression:%s=\"%s\"", e.parameter, e.expression) +} + +type pDeclarationIdentifier interface { + repr() string +} type pDeclaration struct { - id pDeclarationIdentifier - parent *pDeclaration - parameters []pParameter + id pDeclarationIdentifier + parent *pDeclaration + parameters []pParameter declarations []pDeclaration } @@ -420,277 +494,305 @@ func (e *pDeclaration) repr() string { res := e.short() var parameters []string - for _,v := range e.parameters { + for _, v := range e.parameters { parameters = append(parameters, v.repr()) } var groups []string - for _,v := range e.declarations { - groups = append(groups, fmt.Sprintf("-> %s",v.short())) + for _, v := range e.declarations { + groups = append(groups, fmt.Sprintf("-> %s", v.short())) } if e.parent != nil { - res = fmt.Sprintf("%s parent:%s",res,e.parent.short()) + res = fmt.Sprintf("%s parent:%s", res, e.parent.short()) } - return fmt.Sprintf("%s\n%s\n%s\n", res, strings.Join(parameters,"\n"), strings.Join(groups,"\n")) + return fmt.Sprintf("%s\n%s\n%s\n", res, strings.Join(parameters, "\n"), strings.Join(groups, "\n")) } -type pDeclarationGlobal struct {} +type pDeclarationGlobal struct{} + func (e pDeclarationGlobal) repr() string { return fmt.Sprintf("{global}") } -type pDeclarationShared struct { name string } +type pDeclarationShared struct{ name string } + func (e pDeclarationShared) repr() string { return fmt.Sprintf("{shared-network %s}", e.name) } -type pDeclarationSubnet4 struct { net.IPNet } +type pDeclarationSubnet4 struct{ net.IPNet } + func (e pDeclarationSubnet4) repr() string { return fmt.Sprintf("{subnet4 %s}", e.String()) } -type pDeclarationSubnet6 struct { net.IPNet } +type pDeclarationSubnet6 struct{ net.IPNet } + func (e pDeclarationSubnet6) repr() string { return fmt.Sprintf("{subnet6 %s}", e.String()) } -type pDeclarationHost struct { name string } +type pDeclarationHost struct{ name string } + func (e pDeclarationHost) repr() string { return fmt.Sprintf("{host name:%s}", e.name) } -type pDeclarationPool struct {} +type pDeclarationPool struct{} + func (e pDeclarationPool) repr() string { return fmt.Sprintf("{pool}") } -type pDeclarationGroup struct {} +type pDeclarationGroup struct{} + func (e pDeclarationGroup) repr() string { return fmt.Sprintf("{group}") } -type pDeclarationClass struct { name string } +type pDeclarationClass struct{ name string } + func (e pDeclarationClass) repr() string { return fmt.Sprintf("{class}") } /** parsers */ -func parseParameter(val tkParameter) (pParameter,error) { +func parseParameter(val tkParameter) (pParameter, error) { switch val.name { - case "include": - if len(val.operand) != 2 { - return nil,fmt.Errorf("Invalid number of parameters for pParameterInclude : %v",val.operand) - } - name := val.operand[0] - return pParameterInclude{filename: name},nil + case "include": + if len(val.operand) != 2 { + return nil, fmt.Errorf("Invalid number of parameters for pParameterInclude : %v", val.operand) + } + name := val.operand[0] + return pParameterInclude{filename: name}, nil - case "option": - if len(val.operand) != 2 { - return nil,fmt.Errorf("Invalid number of parameters for pParameterOption : %v",val.operand) - } - name, value := val.operand[0], val.operand[1] - return pParameterOption{name: name, value: value},nil + case "option": + if len(val.operand) != 2 { + return nil, fmt.Errorf("Invalid number of parameters for pParameterOption : %v", val.operand) + } + name, value := val.operand[0], val.operand[1] + return pParameterOption{name: name, value: value}, nil - case "allow": fallthrough - case "deny": fallthrough - case "ignore": - if len(val.operand) < 1 { - return nil,fmt.Errorf("Invalid number of parameters for pParameterGrant : %v",val.operand) - } - attribute := strings.Join(val.operand," ") - return pParameterGrant{verb: strings.ToLower(val.name), attribute: attribute},nil + case "allow": + fallthrough + case "deny": + fallthrough + case "ignore": + if len(val.operand) < 1 { + return nil, fmt.Errorf("Invalid number of parameters for pParameterGrant : %v", val.operand) + } + attribute := strings.Join(val.operand, " ") + return pParameterGrant{verb: strings.ToLower(val.name), attribute: attribute}, nil - case "range": - if len(val.operand) < 1 { - return nil,fmt.Errorf("Invalid number of parameters for pParameterRange4 : %v",val.operand) - } - idxAddress := map[bool]int{true:1,false:0}[strings.ToLower(val.operand[0]) == "bootp"] - if len(val.operand) > 2 + idxAddress { - return nil,fmt.Errorf("Invalid number of parameters for pParameterRange : %v",val.operand) - } - if idxAddress + 1 > len(val.operand) { - res := net.ParseIP(val.operand[idxAddress]) - return pParameterRange4{min: res, max: res},nil - } - addr1 := net.ParseIP(val.operand[idxAddress]) - addr2 := net.ParseIP(val.operand[idxAddress+1]) - return pParameterRange4{min: addr1, max: addr2},nil + case "range": + if len(val.operand) < 1 { + return nil, fmt.Errorf("Invalid number of parameters for pParameterRange4 : %v", val.operand) + } + idxAddress := map[bool]int{true: 1, false: 0}[strings.ToLower(val.operand[0]) == "bootp"] + if len(val.operand) > 2+idxAddress { + return nil, fmt.Errorf("Invalid number of parameters for pParameterRange : %v", val.operand) + } + if idxAddress+1 > len(val.operand) { + res := net.ParseIP(val.operand[idxAddress]) + return pParameterRange4{min: res, max: res}, nil + } + addr1 := net.ParseIP(val.operand[idxAddress]) + addr2 := net.ParseIP(val.operand[idxAddress+1]) + return pParameterRange4{min: addr1, max: addr2}, nil - case "range6": - if len(val.operand) == 1 { - address := val.operand[0] - if (strings.Contains(address, "/")) { - cidr := strings.SplitN(address, "/", 2) - if len(cidr) != 2 { return nil,fmt.Errorf("Unknown ipv6 format : %v", address) } - address := net.ParseIP(cidr[0]) - bits,err := strconv.Atoi(cidr[1]) - if err != nil { return nil,err } - mask := net.CIDRMask(bits, net.IPv6len*8) - - // figure out the network address - network := address.Mask(mask) - - // make a broadcast address - broadcast := network - networkSize,totalSize := mask.Size() - hostSize := totalSize-networkSize - for i := networkSize / 8; i < totalSize / 8; i++ { - broadcast[i] = byte(0xff) - } - octetIndex := network[networkSize / 8] - bitsLeft := (uint32)(hostSize%8) - broadcast[octetIndex] = network[octetIndex] | ((1< 1 { + if val.operand[0] == "=" { + return pParameterExpression{parameter: val.name, expression: strings.Join(val.operand[1:], "")}, nil } - if val.operand[0] != "option" { - return nil,fmt.Errorf("Invalid match parameter : %v",val.operand[0]) - } - optionName := val.operand[1] - optionData := val.operand[2] - return pParameterClientMatch{name: optionName, data: optionData},nil - - default: - length := len(val.operand) - if length < 1 { - return pParameterBoolean{parameter: val.name, truancy: true},nil - } else if length > 1 { - if val.operand[0] == "=" { - return pParameterExpression{parameter: val.name, expression: strings.Join(val.operand[1:],"")},nil - } - } - if length != 1 { - return nil,fmt.Errorf("Invalid number of parameters for pParameterOther : %v",val.operand) - } - if strings.ToLower(val.name) == "not" { - return pParameterBoolean{parameter: val.operand[0], truancy: false},nil - } - return pParameterOther{parameter: val.name, value: val.operand[0]}, nil + } + if length != 1 { + return nil, fmt.Errorf("Invalid number of parameters for pParameterOther : %v", val.operand) + } + if strings.ToLower(val.name) == "not" { + return pParameterBoolean{parameter: val.operand[0], truancy: false}, nil + } + return pParameterOther{parameter: val.name, value: val.operand[0]}, nil } } -func parseTokenGroup(val tkGroup) (*pDeclaration,error) { +func parseTokenGroup(val tkGroup) (*pDeclaration, error) { params := val.id.operand switch val.id.name { - case "group": - return &pDeclaration{id:pDeclarationGroup{}},nil + case "group": + return &pDeclaration{id: pDeclarationGroup{}}, nil - case "pool": - return &pDeclaration{id:pDeclarationPool{}},nil + case "pool": + return &pDeclaration{id: pDeclarationPool{}}, nil - case "host": - if len(params) == 1 { - return &pDeclaration{id:pDeclarationHost{name: params[0]}},nil - } + case "host": + if len(params) == 1 { + return &pDeclaration{id: pDeclarationHost{name: params[0]}}, nil + } - case "subnet": - if len(params) == 3 && strings.ToLower(params[1]) == "netmask" { - addr := make([]byte, 4) - for i,v := range strings.SplitN(params[2], ".", 4) { - res,err := strconv.ParseInt(v, 10, 0) - if err != nil { return nil,err } - addr[i] = byte(res) + case "subnet": + if len(params) == 3 && strings.ToLower(params[1]) == "netmask" { + addr := make([]byte, 4) + for i, v := range strings.SplitN(params[2], ".", 4) { + res, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return nil, err } - oc1,oc2,oc3,oc4 := addr[0],addr[1],addr[2],addr[3] - if subnet,mask := net.ParseIP(params[0]),net.IPv4Mask(oc1,oc2,oc3,oc4); subnet != nil && mask != nil { - return &pDeclaration{id:pDeclarationSubnet4{net.IPNet{IP:subnet,Mask:mask}}},nil + addr[i] = byte(res) + } + oc1, oc2, oc3, oc4 := addr[0], addr[1], addr[2], addr[3] + if subnet, mask := net.ParseIP(params[0]), net.IPv4Mask(oc1, oc2, oc3, oc4); subnet != nil && mask != nil { + return &pDeclaration{id: pDeclarationSubnet4{net.IPNet{IP: subnet, Mask: mask}}}, nil + } + } + case "subnet6": + if len(params) == 1 { + ip6 := strings.SplitN(params[0], "/", 2) + if len(ip6) == 2 && strings.Contains(ip6[0], ":") { + address := net.ParseIP(ip6[0]) + prefix, err := strconv.Atoi(ip6[1]) + if err != nil { + return nil, err } + return &pDeclaration{id: pDeclarationSubnet6{net.IPNet{IP: address, Mask: net.CIDRMask(prefix, net.IPv6len*8)}}}, nil } - case "subnet6": - if len(params) == 1 { - ip6 := strings.SplitN(params[0], "/", 2) - if len(ip6) == 2 && strings.Contains(ip6[0], ":") { - address := net.ParseIP(ip6[0]) - prefix,err := strconv.Atoi(ip6[1]) - if err != nil { return nil, err } - return &pDeclaration{id:pDeclarationSubnet6{net.IPNet{IP:address,Mask:net.CIDRMask(prefix, net.IPv6len*8)}}},nil - } - } - case "shared-network": - if len(params) == 1 { - return &pDeclaration{id:pDeclarationShared{name: params[0]}},nil - } - case "": - return &pDeclaration{id:pDeclarationGlobal{}},nil + } + case "shared-network": + if len(params) == 1 { + return &pDeclaration{id: pDeclarationShared{name: params[0]}}, nil + } + case "": + return &pDeclaration{id: pDeclarationGlobal{}}, nil } - return nil,fmt.Errorf("Invalid pDeclaration : %v : %v", val.id.name, params) + return nil, fmt.Errorf("Invalid pDeclaration : %v : %v", val.id.name, params) } -func flattenDhcpConfig(root tkGroup) (*pDeclaration,error) { +func flattenDhcpConfig(root tkGroup) (*pDeclaration, error) { var result *pDeclaration - result,err := parseTokenGroup(root) - if err != nil { return nil,err } + result, err := parseTokenGroup(root) + if err != nil { + return nil, err + } - for _,p := range root.params { - param,err := parseParameter(p) - if err != nil { return nil,err } + for _, p := range root.params { + param, err := parseParameter(p) + if err != nil { + return nil, err + } result.parameters = append(result.parameters, param) } - for _,p := range root.groups { - group,err := flattenDhcpConfig(*p) - if err != nil { return nil,err } + for _, p := range root.groups { + group, err := flattenDhcpConfig(*p) + if err != nil { + return nil, err + } group.parent = result result.declarations = append(result.declarations, *group) } - return result,nil + return result, nil } /** reduce the tree into the things that we care about */ type grant uint + const ( - ALLOW grant = iota + ALLOW grant = iota IGNORE grant = iota - DENY grant = iota + DENY grant = iota ) + type configDeclaration struct { - id []pDeclarationIdentifier + id []pDeclarationIdentifier composites []pDeclaration address []pParameter - options map[string]string - grants map[string]grant - attributes map[string]bool - parameters map[string]string + options map[string]string + grants map[string]grant + attributes map[string]bool + parameters map[string]string expressions map[string]string hostid []pParameterClientMatch @@ -715,28 +817,28 @@ func createDeclaration(node pDeclaration) configDeclaration { result.hostid = make([]pParameterClientMatch, 0) // walk from globals to pDeclaration collecting all parameters - for i := len(hierarchy)-1; i >= 0; i-- { - result.composites = append(result.composites, hierarchy[(len(hierarchy)-1) - i]) - result.id = append(result.id, hierarchy[(len(hierarchy)-1) - i].id) + for i := len(hierarchy) - 1; i >= 0; i-- { + result.composites = append(result.composites, hierarchy[(len(hierarchy)-1)-i]) + result.id = append(result.id, hierarchy[(len(hierarchy)-1)-i].id) // update configDeclaration parameters - for _,p := range hierarchy[i].parameters { + for _, p := range hierarchy[i].parameters { switch p.(type) { - case pParameterOption: - result.options[p.(pParameterOption).name] = p.(pParameterOption).value - case pParameterGrant: - Grant := map[string]grant{"ignore":IGNORE, "allow":ALLOW, "deny":DENY} - result.grants[p.(pParameterGrant).attribute] = Grant[p.(pParameterGrant).verb] - case pParameterBoolean: - result.attributes[p.(pParameterBoolean).parameter] = p.(pParameterBoolean).truancy - case pParameterClientMatch: - result.hostid = append(result.hostid, p.(pParameterClientMatch)) - case pParameterExpression: - result.expressions[p.(pParameterExpression).parameter] = p.(pParameterExpression).expression - case pParameterOther: - result.parameters[p.(pParameterOther).parameter] = p.(pParameterOther).value - default: - result.address = append(result.address, p) + case pParameterOption: + result.options[p.(pParameterOption).name] = p.(pParameterOption).value + case pParameterGrant: + Grant := map[string]grant{"ignore": IGNORE, "allow": ALLOW, "deny": DENY} + result.grants[p.(pParameterGrant).attribute] = Grant[p.(pParameterGrant).verb] + case pParameterBoolean: + result.attributes[p.(pParameterBoolean).parameter] = p.(pParameterBoolean).truancy + case pParameterClientMatch: + result.hostid = append(result.hostid, p.(pParameterClientMatch)) + case pParameterExpression: + result.expressions[p.(pParameterExpression).parameter] = p.(pParameterExpression).expression + case pParameterOther: + result.parameters[p.(pParameterOther).parameter] = p.(pParameterOther).value + default: + result.address = append(result.address, p) } } } @@ -749,112 +851,141 @@ func (e *configDeclaration) repr() string { var res []string res = make([]string, 0) - for _,v := range e.id { res = append(res, v.repr()) } + for _, v := range e.id { + res = append(res, v.repr()) + } result = append(result, strings.Join(res, ",")) if len(e.address) > 0 { res = make([]string, 0) - for _,v := range e.address { res = append(res, v.repr()) } + for _, v := range e.address { + res = append(res, v.repr()) + } result = append(result, fmt.Sprintf("address : %v", strings.Join(res, ","))) } - if len(e.options) > 0 { result = append(result, fmt.Sprintf("options : %v", e.options)) } - if len(e.grants) > 0 { result = append(result, fmt.Sprintf("grants : %v", e.grants)) } - if len(e.attributes) > 0 { result = append(result, fmt.Sprintf("attributes : %v", e.attributes)) } - if len(e.parameters) > 0 { result = append(result, fmt.Sprintf("parameters : %v", e.parameters)) } - if len(e.expressions) > 0 { result = append(result, fmt.Sprintf("parameter-expressions : %v", e.expressions)) } + if len(e.options) > 0 { + result = append(result, fmt.Sprintf("options : %v", e.options)) + } + if len(e.grants) > 0 { + result = append(result, fmt.Sprintf("grants : %v", e.grants)) + } + if len(e.attributes) > 0 { + result = append(result, fmt.Sprintf("attributes : %v", e.attributes)) + } + if len(e.parameters) > 0 { + result = append(result, fmt.Sprintf("parameters : %v", e.parameters)) + } + if len(e.expressions) > 0 { + result = append(result, fmt.Sprintf("parameter-expressions : %v", e.expressions)) + } if len(e.hostid) > 0 { res = make([]string, 0) - for _,v := range e.hostid { res = append(res, v.repr()) } + for _, v := range e.hostid { + res = append(res, v.repr()) + } result = append(result, fmt.Sprintf("hostid : %v", strings.Join(res, " "))) } return strings.Join(result, "\n") + "\n" } -func (e *configDeclaration) IP4() (net.IP,error) { +func (e *configDeclaration) IP4() (net.IP, error) { var result []string - for _,entry := range e.address { + for _, entry := range e.address { switch entry.(type) { - case pParameterAddress4: - for _,s := range entry.(pParameterAddress4) { - result = append(result, s) - } + case pParameterAddress4: + for _, s := range entry.(pParameterAddress4) { + result = append(result, s) + } } } if len(result) > 1 { - return nil,fmt.Errorf("More than one address4 returned : %v", result) + return nil, fmt.Errorf("More than one address4 returned : %v", result) } else if len(result) == 0 { - return nil,fmt.Errorf("No IP4 addresses found") + return nil, fmt.Errorf("No IP4 addresses found") } - if res := net.ParseIP(result[0]); res != nil { return res,nil } - res,err := net.ResolveIPAddr("ip4", result[0]) - if err != nil { return nil,err } - return res.IP,nil + if res := net.ParseIP(result[0]); res != nil { + return res, nil + } + res, err := net.ResolveIPAddr("ip4", result[0]) + if err != nil { + return nil, err + } + return res.IP, nil } -func (e *configDeclaration) IP6() (net.IP,error) { +func (e *configDeclaration) IP6() (net.IP, error) { var result []string - for _,entry := range e.address { + for _, entry := range e.address { switch entry.(type) { - case pParameterAddress6: - for _,s := range entry.(pParameterAddress6) { - result = append(result, s) - } + case pParameterAddress6: + for _, s := range entry.(pParameterAddress6) { + result = append(result, s) + } } } if len(result) > 1 { - return nil,fmt.Errorf("More than one address6 returned : %v", result) + return nil, fmt.Errorf("More than one address6 returned : %v", result) } else if len(result) == 0 { - return nil,fmt.Errorf("No IP6 addresses found") + return nil, fmt.Errorf("No IP6 addresses found") } - if res := net.ParseIP(result[0]); res != nil { return res,nil } - res,err := net.ResolveIPAddr("ip6", result[0]) - if err != nil { return nil,err } - return res.IP,nil + if res := net.ParseIP(result[0]); res != nil { + return res, nil + } + res, err := net.ResolveIPAddr("ip6", result[0]) + if err != nil { + return nil, err + } + return res.IP, nil } -func (e *configDeclaration) Hardware() (net.HardwareAddr,error) { +func (e *configDeclaration) Hardware() (net.HardwareAddr, error) { var result []pParameterHardware - for _,addr := range e.address { + for _, addr := range e.address { switch addr.(type) { - case pParameterHardware: - result = append(result, addr.(pParameterHardware)) + case pParameterHardware: + result = append(result, addr.(pParameterHardware)) } } if len(result) > 0 { - return nil,fmt.Errorf("More than one hardware address returned : %v", result) + return nil, fmt.Errorf("More than one hardware address returned : %v", result) } res := make(net.HardwareAddr, 0) - for _,by := range result[0].address { + for _, by := range result[0].address { res = append(res, by) } - return res,nil + return res, nil } /*** Dhcp Configuration */ type DhcpConfiguration []configDeclaration -func ReadDhcpConfiguration(fd *os.File) (DhcpConfiguration,error) { - fromfile,eof := consumeFile(fd) + +func ReadDhcpConfiguration(fd *os.File) (DhcpConfiguration, error) { + fromfile, eof := consumeFile(fd) uncommented := uncomment(eof, fromfile) tokenized := tokenizeDhcpConfig(eof, uncommented) - parsetree,err := parseDhcpConfig(eof, tokenized) - if err != nil { return nil,err } + parsetree, err := parseDhcpConfig(eof, tokenized) + if err != nil { + return nil, err + } - global,err := flattenDhcpConfig(parsetree) - if err != nil { return nil,err } + global, err := flattenDhcpConfig(parsetree) + if err != nil { + return nil, err + } - var walkDeclarations func(root pDeclaration, out chan*configDeclaration); - walkDeclarations = func(root pDeclaration, out chan*configDeclaration) { + var walkDeclarations func(root pDeclaration, out chan *configDeclaration) + walkDeclarations = func(root pDeclaration, out chan *configDeclaration) { res := createDeclaration(root) out <- &res - for _,p := range root.declarations { + for _, p := range root.declarations { walkDeclarations(p, out) } } - each := make(chan*configDeclaration) - go func(out chan*configDeclaration) { + each := make(chan *configDeclaration) + go func(out chan *configDeclaration) { walkDeclarations(*global, out) out <- nil }(each) @@ -863,7 +994,7 @@ func ReadDhcpConfiguration(fd *os.File) (DhcpConfiguration,error) { for decl := <-each; decl != nil; decl = <-each { result = append(result, *decl) } - return result,nil + return result, nil } func (e *DhcpConfiguration) Global() configDeclaration { @@ -874,83 +1005,86 @@ func (e *DhcpConfiguration) Global() configDeclaration { return result } -func (e *DhcpConfiguration) SubnetByAddress(address net.IP) (configDeclaration,error) { +func (e *DhcpConfiguration) SubnetByAddress(address net.IP) (configDeclaration, error) { var result []configDeclaration - for _,entry := range *e { + for _, entry := range *e { switch entry.id[0].(type) { - case pDeclarationSubnet4: - id := entry.id[0].(pDeclarationSubnet4) - if id.Contains(address) { - result = append(result, entry) - } - case pDeclarationSubnet6: - id := entry.id[0].(pDeclarationSubnet6) - if id.Contains(address) { - result = append(result, entry) - } + case pDeclarationSubnet4: + id := entry.id[0].(pDeclarationSubnet4) + if id.Contains(address) { + result = append(result, entry) + } + case pDeclarationSubnet6: + id := entry.id[0].(pDeclarationSubnet6) + if id.Contains(address) { + result = append(result, entry) + } } } if len(result) == 0 { - return configDeclaration{},fmt.Errorf("No network declarations containing %s found", address.String()) + return configDeclaration{}, fmt.Errorf("No network declarations containing %s found", address.String()) } if len(result) > 1 { - return configDeclaration{},fmt.Errorf("More than 1 network declaration found : %v", result) + return configDeclaration{}, fmt.Errorf("More than 1 network declaration found : %v", result) } - return result[0],nil + return result[0], nil } -func (e *DhcpConfiguration) HostByName(host string) (configDeclaration,error) { +func (e *DhcpConfiguration) HostByName(host string) (configDeclaration, error) { var result []configDeclaration - for _,entry := range *e { + for _, entry := range *e { switch entry.id[0].(type) { - case pDeclarationHost: - id := entry.id[0].(pDeclarationHost) - if strings.ToLower(id.name) == strings.ToLower(host) { - result = append(result, entry) - } + case pDeclarationHost: + id := entry.id[0].(pDeclarationHost) + if strings.ToLower(id.name) == strings.ToLower(host) { + result = append(result, entry) + } } } if len(result) == 0 { - return configDeclaration{},fmt.Errorf("No host declarations containing %s found", host) + return configDeclaration{}, fmt.Errorf("No host declarations containing %s found", host) } if len(result) > 1 { - return configDeclaration{},fmt.Errorf("More than 1 host declaration found : %v", result) + return configDeclaration{}, fmt.Errorf("More than 1 host declaration found : %v", result) } - return result[0],nil + return result[0], nil } /*** Network Map */ type NetworkMap []map[string]string -func ReadNetworkMap(fd *os.File) (NetworkMap,error) { - fromfile,eof := consumeFile(fd) - uncommented := uncomment(eof,fromfile) +func ReadNetworkMap(fd *os.File) (NetworkMap, error) { + + fromfile, eof := consumeFile(fd) + uncommented := uncomment(eof, fromfile) tokenized := tokenizeNetworkMapConfig(eof, uncommented) - result,err := parseNetworkMapConfig(eof, tokenized) - if err != nil { return nil,err } - return result,nil + result, err := parseNetworkMapConfig(eof, tokenized) + if err != nil { + return nil, err + } + return result, nil } -func (e *NetworkMap) NameIntoDevice(name string) (string,error) { - for _,val := range *e { +func (e *NetworkMap) NameIntoDevice(name string) (string, error) { + for _, val := range *e { if strings.ToLower(val["name"]) == strings.ToLower(name) { - return val["device"],nil + return val["device"], nil } } - return "",fmt.Errorf("Network name not found : %v", name) + return "", fmt.Errorf("Network name not found : %v", name) } -func (e *NetworkMap) DeviceIntoName(device string) (string,error) { - for _,val := range *e { +func (e *NetworkMap) DeviceIntoName(device string) (string, error) { + for _, val := range *e { if strings.ToLower(val["device"]) == strings.ToLower(device) { - return val["name"],nil + return val["name"], nil } } - return "",fmt.Errorf("Device name not found : %v", device) + return "", fmt.Errorf("Device name not found : %v", device) } func (e *NetworkMap) repr() string { var result []string - for idx,val := range *e { + for idx, val := range *e { result = append(result, fmt.Sprintf("network%d.name = \"%s\"", idx, val["name"])) result = append(result, fmt.Sprintf("network%d.device = \"%s\"", idx, val["device"])) } @@ -958,17 +1092,19 @@ func (e *NetworkMap) repr() string { } /** main */ -func consumeFile(fd *os.File) (chan byte,sentinelSignaller) { +func consumeFile(fd *os.File) (chan byte, sentinelSignaller) { fromfile := make(chan byte) eof := make(sentinelSignaller) go func() { b := make([]byte, 1) for { - _,err := fd.Read(b) - if err == io.EOF { break } + _, err := fd.Read(b) + if err == io.EOF { + break + } fromfile <- b[0] } close(eof) }() - return fromfile,eof + return fromfile, eof } diff --git a/builder/vmware/common/driver_player_unix.go b/builder/vmware/common/driver_player_unix.go index 01b877d92..068720a6c 100644 --- a/builder/vmware/common/driver_player_unix.go +++ b/builder/vmware/common/driver_player_unix.go @@ -8,9 +8,9 @@ import ( "fmt" "log" "os/exec" + "path/filepath" "regexp" "runtime" - "path/filepath" ) func playerFindVdiskManager() (string, error) { diff --git a/builder/vmware/common/driver_workstation10.go b/builder/vmware/common/driver_workstation10.go index 716717e39..7e6ac8b8f 100644 --- a/builder/vmware/common/driver_workstation10.go +++ b/builder/vmware/common/driver_workstation10.go @@ -33,4 +33,3 @@ func (d *Workstation10Driver) Verify() error { return workstationVerifyVersion(VMWARE_WS_VERSION) } - diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index 2cce8fd3c..2dcc082ac 100644 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -39,38 +39,35 @@ type Config struct { vmwcommon.VMXConfig `mapstructure:",squash"` // disk drives - AdditionalDiskSize []uint `mapstructure:"disk_additional_size"` - DiskName string `mapstructure:"vmdk_name"` - DiskSize uint `mapstructure:"disk_size"` - DiskTypeId string `mapstructure:"disk_type_id"` - Format string `mapstructure:"format"` + AdditionalDiskSize []uint `mapstructure:"disk_additional_size"` + DiskName string `mapstructure:"vmdk_name"` + DiskSize uint `mapstructure:"disk_size"` + DiskTypeId string `mapstructure:"disk_type_id"` + Format string `mapstructure:"format"` // platform information - GuestOSType string `mapstructure:"guest_os_type"` - Version string `mapstructure:"version"` - VMName string `mapstructure:"vm_name"` + GuestOSType string `mapstructure:"guest_os_type"` + Version string `mapstructure:"version"` + VMName string `mapstructure:"vm_name"` // Network type - Network string `mapstructure:"network"` + Network string `mapstructure:"network"` // device presence - Sound bool `mapstructure:"sound"` - USB bool `mapstructure:"usb"` + Sound bool `mapstructure:"sound"` + USB bool `mapstructure:"usb"` // communication ports - Serial string `mapstructure:"serial"` - Parallel string `mapstructure:"parallel"` + Serial string `mapstructure:"serial"` + Parallel string `mapstructure:"parallel"` // booting a guest - BootCommand []string `mapstructure:"boot_command"` KeepRegistered bool `mapstructure:"keep_registered"` OVFToolOptions []string `mapstructure:"ovftool_options"` SkipCompaction bool `mapstructure:"skip_compaction"` SkipExport bool `mapstructure:"skip_export"` - VMName string `mapstructure:"vm_name"` VMXDiskTemplatePath string `mapstructure:"vmx_disk_template_path"` VMXTemplatePath string `mapstructure:"vmx_template_path"` - Version string `mapstructure:"version"` // remote vsphere RemoteType string `mapstructure:"remote_type"` diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index d37f00969..0261123a6 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "os" "path/filepath" + "runtime" "strings" vmwcommon "github.com/hashicorp/packer/builder/vmware/common" @@ -21,24 +22,24 @@ type vmxTemplateData struct { ISOPath string Version string - Network_Type string - Network_Device string + Network_Type string + Network_Device string Sound_Present string - Usb_Present string + Usb_Present string - Serial_Present string - Serial_Type string + Serial_Present string + Serial_Type string Serial_Endpoint string - Serial_Host string - Serial_Yield string + Serial_Host string + Serial_Yield string Serial_Filename string - Serial_Auto string + Serial_Auto string - Parallel_Present string + Parallel_Present string Parallel_Bidirectional string - Parallel_Filename string - Parallel_Auto string + Parallel_Filename string + Parallel_Auto string } type additionalDiskTemplateData struct { @@ -63,34 +64,34 @@ type stepCreateVMX struct { type serialConfigPipe struct { filename string endpoint string - host string - yield string + host string + yield string } type serialConfigFile struct { filename string - yield string + yield string } type serialConfigDevice struct { devicename string - yield string + yield string } type serialConfigAuto struct { devicename string - yield string + yield string } type serialUnion struct { serialType interface{} - pipe *serialConfigPipe - file *serialConfigFile - device *serialConfigDevice - auto *serialConfigAuto + pipe *serialConfigPipe + file *serialConfigFile + device *serialConfigDevice + auto *serialConfigAuto } -func unformat_serial(config string) (*serialUnion,error) { +func unformat_serial(config string) (*serialUnion, error) { var defaultSerialPort string if runtime.GOOS == "windows" { defaultSerialPort = "COM1" @@ -100,7 +101,7 @@ func unformat_serial(config string) (*serialUnion,error) { input := strings.SplitN(config, ":", 2) if len(input) < 1 { - return nil,fmt.Errorf("Unexpected format for serial port: %s", config) + return nil, fmt.Errorf("Unexpected format for serial port: %s", config) } var formatType, formatOptions string @@ -112,116 +113,116 @@ func unformat_serial(config string) (*serialUnion,error) { } switch strings.ToUpper(formatType) { - case "PIPE": - comp := strings.Split(formatOptions, ",") - if len(comp) < 3 || len(comp) > 4 { - return nil,fmt.Errorf("Unexpected format for serial port : pipe : %s", config) - } - if res := strings.ToLower(comp[1]); res != "client" && res != "server" { - return nil,fmt.Errorf("Unexpected format for serial port : pipe : endpoint : %s : %s", res, config) - } - if res := strings.ToLower(comp[2]); res != "app" && res != "vm" { - return nil,fmt.Errorf("Unexpected format for serial port : pipe : host : %s : %s", res, config) - } - res := &serialConfigPipe{ - filename : comp[0], - endpoint : comp[1], - host : map[string]string{"app":"TRUE","vm":"FALSE"}[strings.ToLower(comp[2])], - yield : "FALSE", - } - if len(comp) == 4 { - res.yield = strings.ToUpper(comp[3]) - } - if res.yield != "TRUE" && res.yield != "FALSE" { - return nil,fmt.Errorf("Unexpected format for serial port : pipe : yield : %s : %s", res.yield, config) - } - return &serialUnion{serialType:res, pipe:res},nil + case "PIPE": + comp := strings.Split(formatOptions, ",") + if len(comp) < 3 || len(comp) > 4 { + return nil, fmt.Errorf("Unexpected format for serial port : pipe : %s", config) + } + if res := strings.ToLower(comp[1]); res != "client" && res != "server" { + return nil, fmt.Errorf("Unexpected format for serial port : pipe : endpoint : %s : %s", res, config) + } + if res := strings.ToLower(comp[2]); res != "app" && res != "vm" { + return nil, fmt.Errorf("Unexpected format for serial port : pipe : host : %s : %s", res, config) + } + res := &serialConfigPipe{ + filename: comp[0], + endpoint: comp[1], + host: map[string]string{"app": "TRUE", "vm": "FALSE"}[strings.ToLower(comp[2])], + yield: "FALSE", + } + if len(comp) == 4 { + res.yield = strings.ToUpper(comp[3]) + } + if res.yield != "TRUE" && res.yield != "FALSE" { + return nil, fmt.Errorf("Unexpected format for serial port : pipe : yield : %s : %s", res.yield, config) + } + return &serialUnion{serialType: res, pipe: res}, nil - case "FILE": - comp := strings.Split(formatOptions, ",") - if len(comp) > 2 { - return nil,fmt.Errorf("Unexpected format for serial port : file : %s", config) - } + case "FILE": + comp := strings.Split(formatOptions, ",") + if len(comp) > 2 { + return nil, fmt.Errorf("Unexpected format for serial port : file : %s", config) + } - res := &serialConfigFile{ yield : "FALSE" } + res := &serialConfigFile{yield: "FALSE"} - res.filename = filepath.FromSlash(comp[0]) + res.filename = filepath.FromSlash(comp[0]) - res.yield = map[bool]string{true:strings.ToUpper(comp[0]), false:"FALSE"}[len(comp) > 1] - if res.yield != "TRUE" && res.yield != "FALSE" { - return nil,fmt.Errorf("Unexpected format for serial port : file : yield : %s : %s", res.yield, config) - } + res.yield = map[bool]string{true: strings.ToUpper(comp[0]), false: "FALSE"}[len(comp) > 1] + if res.yield != "TRUE" && res.yield != "FALSE" { + return nil, fmt.Errorf("Unexpected format for serial port : file : yield : %s : %s", res.yield, config) + } - return &serialUnion{serialType:res, file:res},nil + return &serialUnion{serialType: res, file: res}, nil - case "DEVICE": - comp := strings.Split(formatOptions, ",") - if len(comp) > 2 { - return nil,fmt.Errorf("Unexpected format for serial port : device : %s", config) - } + case "DEVICE": + comp := strings.Split(formatOptions, ",") + if len(comp) > 2 { + return nil, fmt.Errorf("Unexpected format for serial port : device : %s", config) + } - res := new(serialConfigDevice) + res := new(serialConfigDevice) - if len(comp) == 2 { - res.devicename = map[bool]string{true:filepath.FromSlash(comp[0]), false:defaultSerialPort}[len(comp[0]) > 0] - res.yield = strings.ToUpper(comp[1]) - } else if len(comp) == 1 { - res.devicename = map[bool]string{true:filepath.FromSlash(comp[0]), false:defaultSerialPort}[len(comp[0]) > 0] - res.yield = "FALSE" - } else if len(comp) == 0 { - res.devicename = defaultSerialPort - res.yield = "FALSE" - } - - if res.yield != "TRUE" && res.yield != "FALSE" { - return nil,fmt.Errorf("Unexpected format for serial port : device : yield : %s : %s", res.yield, config) - } - - return &serialUnion{serialType:res, device:res},nil - - case "AUTO": - res := new(serialConfigAuto) + if len(comp) == 2 { + res.devicename = map[bool]string{true: filepath.FromSlash(comp[0]), false: defaultSerialPort}[len(comp[0]) > 0] + res.yield = strings.ToUpper(comp[1]) + } else if len(comp) == 1 { + res.devicename = map[bool]string{true: filepath.FromSlash(comp[0]), false: defaultSerialPort}[len(comp[0]) > 0] + res.yield = "FALSE" + } else if len(comp) == 0 { res.devicename = defaultSerialPort + res.yield = "FALSE" + } - if len(formatOptions) > 0 { - res.yield = strings.ToUpper(formatOptions) - } else { - res.yield = "FALSE" - } + if res.yield != "TRUE" && res.yield != "FALSE" { + return nil, fmt.Errorf("Unexpected format for serial port : device : yield : %s : %s", res.yield, config) + } - if res.yield != "TRUE" && res.yield != "FALSE" { - return nil,fmt.Errorf("Unexpected format for serial port : auto : yield : %s : %s", res.yield, config) - } + return &serialUnion{serialType: res, device: res}, nil - return &serialUnion{serialType:res, auto:res},nil + case "AUTO": + res := new(serialConfigAuto) + res.devicename = defaultSerialPort - default: - return nil,fmt.Errorf("Unknown serial type : %s : %s", strings.ToUpper(formatType), config) + if len(formatOptions) > 0 { + res.yield = strings.ToUpper(formatOptions) + } else { + res.yield = "FALSE" + } + + if res.yield != "TRUE" && res.yield != "FALSE" { + return nil, fmt.Errorf("Unexpected format for serial port : auto : yield : %s : %s", res.yield, config) + } + + return &serialUnion{serialType: res, auto: res}, nil + + default: + return nil, fmt.Errorf("Unknown serial type : %s : %s", strings.ToUpper(formatType), config) } } /* parallel port */ type parallelUnion struct { parallelType interface{} - file *parallelPortFile - device *parallelPortDevice - auto *parallelPortAuto + file *parallelPortFile + device *parallelPortDevice + auto *parallelPortAuto } type parallelPortFile struct { filename string } type parallelPortDevice struct { bidirectional string - devicename string + devicename string } type parallelPortAuto struct { bidirectional string } -func unformat_parallel(config string) (*parallelUnion,error) { +func unformat_parallel(config string) (*parallelUnion, error) { input := strings.SplitN(config, ":", 2) if len(input) < 1 { - return nil,fmt.Errorf("Unexpected format for parallel port: %s", config) + return nil, fmt.Errorf("Unexpected format for parallel port: %s", config) } var formatType, formatOptions string @@ -233,39 +234,44 @@ func unformat_parallel(config string) (*parallelUnion,error) { } switch strings.ToUpper(formatType) { - case "FILE": - res := ¶llelPortFile{ filename: filepath.FromSlash(formatOptions) } - return ¶llelUnion{ parallelType:res, file: res},nil - case "DEVICE": - comp := strings.Split(formatOptions, ",") - if len(comp) < 1 || len(comp) > 2 { - return nil,fmt.Errorf("Unexpected format for parallel port: %s", config) + case "FILE": + res := ¶llelPortFile{filename: filepath.FromSlash(formatOptions)} + return ¶llelUnion{parallelType: res, file: res}, nil + case "DEVICE": + comp := strings.Split(formatOptions, ",") + if len(comp) < 1 || len(comp) > 2 { + return nil, fmt.Errorf("Unexpected format for parallel port: %s", config) + } + res := new(parallelPortDevice) + res.bidirectional = "FALSE" + res.devicename = filepath.FromSlash(comp[0]) + if len(comp) > 1 { + switch strings.ToUpper(comp[1]) { + case "BI": + res.bidirectional = "TRUE" + case "UNI": + res.bidirectional = "FALSE" + default: + return nil, fmt.Errorf("Unknown parallel port direction : %s : %s", strings.ToUpper(comp[0]), config) } - res := new(parallelPortDevice) - res.bidirectional = "FALSE" - res.devicename = strings.ToUpper(comp[0]) - if len(comp) > 1 { - switch strings.ToUpper(comp[1]) { - case "BI": res.bidirectional = "TRUE" - case "UNI": res.bidirectional = "FALSE" - default: - return nil,fmt.Errorf("Unknown parallel port direction : %s : %s", strings.ToUpper(comp[0]), config) - } - } - return ¶llelUnion{ parallelType:res, device:res },nil + } + return ¶llelUnion{parallelType: res, device: res}, nil - case "AUTO": - res := new(parallelPortAuto) - switch strings.ToUpper(formatOptions) { - case "": fallthrough - case "UNI": res.bidirectional = "FALSE" - case "BI": res.bidirectional = "TRUE" - default: - return nil,fmt.Errorf("Unknown parallel port direction : %s : %s", strings.ToUpper(formatOptions), config) - } - return ¶llelUnion{ parallelType:res, auto:res },nil + case "AUTO": + res := new(parallelPortAuto) + switch strings.ToUpper(formatOptions) { + case "": + fallthrough + case "UNI": + res.bidirectional = "FALSE" + case "BI": + res.bidirectional = "TRUE" + default: + return nil, fmt.Errorf("Unknown parallel port direction : %s : %s", strings.ToUpper(formatOptions), config) + } + return ¶llelUnion{parallelType: res, auto: res}, nil } - return nil,fmt.Errorf("Unexpected format for parallel port: %s", config) + return nil, fmt.Errorf("Unexpected format for parallel port: %s", config) } /* regular steps */ @@ -348,11 +354,11 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist Version: config.Version, ISOPath: isoPath, - Sound_Present: map[bool]string{true:"TRUE",false:"FALSE"}[bool(config.Sound)], - Usb_Present: map[bool]string{true:"TRUE",false:"FALSE"}[bool(config.USB)], + Sound_Present: map[bool]string{true: "TRUE", false: "FALSE"}[bool(config.Sound)], + Usb_Present: map[bool]string{true: "TRUE", false: "FALSE"}[bool(config.USB)], - Serial_Present: "FALSE", - Parallel_Present: "FALSE", + Serial_Present: "FALSE", + Parallel_Present: "FALSE", } /// Check the network type that the user specified @@ -367,7 +373,7 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist ui.Error(err.Error()) return multistep.ActionHalt } - netmap,res := vmwcommon.ReadNetmapConfig(pathNetmap) + netmap, res := vmwcommon.ReadNetmapConfig(pathNetmap) if res != nil { err := fmt.Errorf("Unable to read netmap conf file: %s: %v", pathNetmap, res) state.Put("error", err) @@ -376,13 +382,13 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist } // try and convert the specified network to a device - device,err := netmap.NameIntoDevice(network) + device, err := netmap.NameIntoDevice(network) // success. so we know that it's an actual network type inside netmap.conf if err == nil { templateData.Network_Type = network templateData.Network_Device = device - // we were unable to find the type, so assume it's a custom network device. + // we were unable to find the type, so assume it's a custom network device. } else { templateData.Network_Type = "custom" templateData.Network_Device = network @@ -392,7 +398,7 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist /// check if serial port has been configured if config.Serial != "" { - serial,err := unformat_serial(config.Serial) + serial, err := unformat_serial(config.Serial) if err != nil { err := fmt.Errorf("Error procesing VMX template: %s", err) state.Put("error", err) @@ -408,35 +414,35 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist templateData.Serial_Auto = "FALSE" switch serial.serialType.(type) { - case *serialConfigPipe: - templateData.Serial_Type = "pipe" - templateData.Serial_Endpoint = serial.pipe.endpoint - templateData.Serial_Host = serial.pipe.host - templateData.Serial_Yield = serial.pipe.yield - templateData.Serial_Filename = filepath.FromSlash(serial.pipe.filename) - case *serialConfigFile: - templateData.Serial_Type = "file" - templateData.Serial_Filename = filepath.FromSlash(serial.file.filename) - case *serialConfigDevice: - templateData.Serial_Type = "device" - templateData.Serial_Filename = filepath.FromSlash(serial.device.devicename) - case *serialConfigAuto: - templateData.Serial_Type = "device" - templateData.Serial_Filename = filepath.FromSlash(serial.auto.devicename) - templateData.Serial_Yield = serial.auto.yield - templateData.Serial_Auto = "TRUE" + case *serialConfigPipe: + templateData.Serial_Type = "pipe" + templateData.Serial_Endpoint = serial.pipe.endpoint + templateData.Serial_Host = serial.pipe.host + templateData.Serial_Yield = serial.pipe.yield + templateData.Serial_Filename = filepath.FromSlash(serial.pipe.filename) + case *serialConfigFile: + templateData.Serial_Type = "file" + templateData.Serial_Filename = filepath.FromSlash(serial.file.filename) + case *serialConfigDevice: + templateData.Serial_Type = "device" + templateData.Serial_Filename = filepath.FromSlash(serial.device.devicename) + case *serialConfigAuto: + templateData.Serial_Type = "device" + templateData.Serial_Filename = filepath.FromSlash(serial.auto.devicename) + templateData.Serial_Yield = serial.auto.yield + templateData.Serial_Auto = "TRUE" - default: - err := fmt.Errorf("Error procesing VMX template: %v", serial) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt + default: + err := fmt.Errorf("Error procesing VMX template: %v", serial) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt } } /// check if parallel port has been configured if config.Parallel != "" { - parallel,err := unformat_parallel(config.Parallel) + parallel, err := unformat_parallel(config.Parallel) if err != nil { err := fmt.Errorf("Error procesing VMX template: %s", err) state.Put("error", err) @@ -446,22 +452,22 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist templateData.Parallel_Auto = "FALSE" switch parallel.parallelType.(type) { - case *parallelPortFile: - templateData.Parallel_Present = "TRUE" - templateData.Parallel_Filename = filepath.FromSlash(parallel.file.filename) - case *parallelPortDevice: - templateData.Parallel_Present = "TRUE" - templateData.Parallel_Bidirectional = parallel.device.bidirectional - templateData.Parallel_Filename = filepath.FromSlash(parallel.device.devicename) - case *parallelPortAuto: - templateData.Parallel_Present = "TRUE" - templateData.Parallel_Auto = "TRUE" - templateData.Parallel_Bidirectional = parallel.auto.bidirectional - default: - err := fmt.Errorf("Error procesing VMX template: %v", parallel) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt + case *parallelPortFile: + templateData.Parallel_Present = "TRUE" + templateData.Parallel_Filename = filepath.FromSlash(parallel.file.filename) + case *parallelPortDevice: + templateData.Parallel_Present = "TRUE" + templateData.Parallel_Bidirectional = parallel.device.bidirectional + templateData.Parallel_Filename = filepath.FromSlash(parallel.device.devicename) + case *parallelPortAuto: + templateData.Parallel_Present = "TRUE" + templateData.Parallel_Auto = "TRUE" + templateData.Parallel_Bidirectional = parallel.auto.bidirectional + default: + err := fmt.Errorf("Error procesing VMX template: %v", parallel) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt } } diff --git a/builder/vmware/iso/step_create_vmx_test.go b/builder/vmware/iso/step_create_vmx_test.go index e540eee8a..7f5c1d91c 100644 --- a/builder/vmware/iso/step_create_vmx_test.go +++ b/builder/vmware/iso/step_create_vmx_test.go @@ -1,37 +1,37 @@ package iso import ( + "bytes" "fmt" - "os" - "path/filepath" - "strings" + "io/ioutil" "math" "math/rand" - "strconv" - "io/ioutil" - "bytes" + "os" + "path/filepath" "runtime" + "strconv" + "strings" - "testing" "github.com/mitchellh/packer/packer" - "github.com/mitchellh/packer/template" "github.com/mitchellh/packer/provisioner/shell" + "github.com/mitchellh/packer/template" + "testing" ) var vmxTestBuilderConfig = map[string]string{ - "type": `"vmware-iso"`, - "iso_url": `"https://archive.org/download/ut-ttylinux-i686-12.6/ut-ttylinux-i686-12.6.iso"`, + "type": `"vmware-iso"`, + "iso_url": `"https://archive.org/download/ut-ttylinux-i686-12.6/ut-ttylinux-i686-12.6.iso"`, "iso_checksum_type": `"md5"`, - "iso_checksum": `"43c1feeae55a44c6ef694b8eb18408a6"`, - "ssh_username": `"root"`, - "ssh_password": `"password"`, - "ssh_wait_timeout": `"45s"`, - "boot_command": `["","rootpassword","udhcpc"]`, - "shutdown_command": `"/sbin/shutdown -h; exit 0"`, + "iso_checksum": `"43c1feeae55a44c6ef694b8eb18408a6"`, + "ssh_username": `"root"`, + "ssh_password": `"password"`, + "ssh_wait_timeout": `"45s"`, + "boot_command": `["","rootpassword","udhcpc"]`, + "shutdown_command": `"/sbin/shutdown -h; exit 0"`, } var vmxTestProvisionerConfig = map[string]string{ - "type": `"shell"`, + "type": `"shell"`, "inline": `["echo hola mundo"]`, } @@ -40,24 +40,26 @@ const vmxTestTemplate string = `{"builders":[{%s}],"provisioners":[{%s}]}` func tmpnam(prefix string) string { var path string var err error - + const length = 16 - + dir := os.TempDir() max := int(math.Pow(2, float64(length))) - n,err := rand.Intn(max),nil - for path = filepath.Join(dir, prefix + strconv.Itoa(n)); err == nil; _,err = os.Stat(path) { + n, err := rand.Intn(max), nil + for path = filepath.Join(dir, prefix+strconv.Itoa(n)); err == nil; _, err = os.Stat(path) { n = rand.Intn(max) - path = filepath.Join(dir, prefix + strconv.Itoa(n)) + path = filepath.Join(dir, prefix+strconv.Itoa(n)) } return path } -func createFloppyOutput(prefix string) (string,string,error) { +func createFloppyOutput(prefix string) (string, string, error) { output := tmpnam(prefix) - f,err := os.Create(output) - if err != nil { return "","",fmt.Errorf("Unable to create empty %s: %s", output, err) } + f, err := os.Create(output) + if err != nil { + return "", "", fmt.Errorf("Unable to create empty %s: %s", output, err) + } f.Close() vmxData := []string{ @@ -69,18 +71,24 @@ func createFloppyOutput(prefix string) (string,string,error) { } outputFile := strings.Replace(output, "\\", "\\\\", -1) - vmxString := fmt.Sprintf("{" + strings.Join(vmxData, ",") + "}", outputFile) - return output,vmxString,nil + vmxString := fmt.Sprintf("{"+strings.Join(vmxData, ",")+"}", outputFile) + return output, vmxString, nil } -func readFloppyOutput(path string) (string,error) { - f,err := os.Open(path) - if err != nil { return "",fmt.Errorf("Unable to open file %s", path) } +func readFloppyOutput(path string) (string, error) { + f, err := os.Open(path) + if err != nil { + return "", fmt.Errorf("Unable to open file %s", path) + } defer f.Close() - data,err := ioutil.ReadAll(f) - if err != nil { return "",fmt.Errorf("Unable to read file: %s", err) } - if len(data) == 0 { return "", nil } - return string(data[:bytes.IndexByte(data,0)]),nil + data, err := ioutil.ReadAll(f) + if err != nil { + return "", fmt.Errorf("Unable to read file: %s", err) + } + if len(data) == 0 { + return "", nil + } + return string(data[:bytes.IndexByte(data, 0)]), nil } func setupVMwareBuild(t *testing.T, builderConfig map[string]string, provisionerConfig map[string]string) error { @@ -88,72 +96,84 @@ func setupVMwareBuild(t *testing.T, builderConfig map[string]string, provisioner // create builder config and update with user-supplied options cfgBuilder := map[string]string{} - for k,v := range vmxTestBuilderConfig { cfgBuilder[k] = v } - for k,v := range builderConfig { cfgBuilder[k] = v } + for k, v := range vmxTestBuilderConfig { + cfgBuilder[k] = v + } + for k, v := range builderConfig { + cfgBuilder[k] = v + } // convert our builder config into a single sprintfable string builderLines := []string{} - for k,v := range cfgBuilder { + for k, v := range cfgBuilder { builderLines = append(builderLines, fmt.Sprintf(`"%s":%s`, k, v)) } // create provisioner config and update with user-supplied options cfgProvisioner := map[string]string{} - for k,v := range vmxTestProvisionerConfig { cfgProvisioner[k] = v } - for k,v := range provisionerConfig { cfgProvisioner[k] = v } + for k, v := range vmxTestProvisionerConfig { + cfgProvisioner[k] = v + } + for k, v := range provisionerConfig { + cfgProvisioner[k] = v + } // convert our provisioner config into a single sprintfable string provisionerLines := []string{} - for k,v := range cfgProvisioner { + for k, v := range cfgProvisioner { provisionerLines = append(provisionerLines, fmt.Sprintf(`"%s":%s`, k, v)) } // and now parse them into a template - configString := fmt.Sprintf(vmxTestTemplate, strings.Join(builderLines,`,`), strings.Join(provisionerLines,`,`)) + configString := fmt.Sprintf(vmxTestTemplate, strings.Join(builderLines, `,`), strings.Join(provisionerLines, `,`)) - tpl,err := template.Parse(strings.NewReader(configString)) + tpl, err := template.Parse(strings.NewReader(configString)) if err != nil { t.Fatalf("Unable to parse test config: %s", err) } // create our config to test the vmware-iso builder components := packer.ComponentFinder{ - Builder: func(n string) (packer.Builder,error) { - return &Builder{},nil + Builder: func(n string) (packer.Builder, error) { + return &Builder{}, nil }, - Hook: func(n string) (packer.Hook,error) { - return &packer.DispatchHook{},nil + Hook: func(n string) (packer.Hook, error) { + return &packer.DispatchHook{}, nil }, - PostProcessor: func(n string) (packer.PostProcessor,error) { - return &packer.MockPostProcessor{},nil + PostProcessor: func(n string) (packer.PostProcessor, error) { + return &packer.MockPostProcessor{}, nil }, - Provisioner: func(n string) (packer.Provisioner,error) { - return &shell.Provisioner{},nil + Provisioner: func(n string) (packer.Provisioner, error) { + return &shell.Provisioner{}, nil }, } config := packer.CoreConfig{ - Template: tpl, + Template: tpl, Components: components, } // create a core using our template - core,err := packer.NewCore(&config) - if err != nil { t.Fatalf("Unable to create core: %s", err) } + core, err := packer.NewCore(&config) + if err != nil { + t.Fatalf("Unable to create core: %s", err) + } // now we can prepare our build - b,err := core.Build("vmware-iso") - if err != nil { t.Fatalf("Unable to create build: %s", err) } + b, err := core.Build("vmware-iso") + if err != nil { + t.Fatalf("Unable to create build: %s", err) + } - warn,err := b.Prepare() + warn, err := b.Prepare() if len(warn) > 0 { - for _,w := range warn { + for _, w := range warn { t.Logf("Configuration warning: %s", w) } } // and then finally build it cache := &packer.FileCache{CacheDir: os.TempDir()} - artifacts,err := b.Run(ui, cache) + artifacts, err := b.Run(ui, cache) if err != nil { t.Fatalf("Failed to build artifact: %s", err) } @@ -166,7 +186,7 @@ func setupVMwareBuild(t *testing.T, builderConfig map[string]string, provisioner // otherwise some number of errors happened t.Logf("Unexpected number of artifacts returned: %d", len(artifacts)) errors := make([]error, 0) - for _,artifact := range artifacts { + for _, artifact := range artifacts { if err := artifact.Destroy(); err != nil { errors = append(errors, err) } @@ -186,9 +206,11 @@ func TestStepCreateVmx_SerialFile(t *testing.T) { } error := setupVMwareBuild(t, serialConfig, map[string]string{}) - if error != nil { t.Errorf("Unable to read file: %s", error) } + if error != nil { + t.Errorf("Unable to read file: %s", error) + } - f,err := os.Stat(tmpfile) + f, err := os.Stat(tmpfile) if err != nil { t.Errorf("VMware builder did not create a file for serial port: %s", err) } @@ -216,19 +238,29 @@ func TestStepCreateVmx_SerialPort(t *testing.T) { } // where to write output - output,vmxData,err := createFloppyOutput("SerialPortOutput.") - if err != nil { t.Fatalf("Error creating output: %s", err) } - defer func() { if _,err := os.Stat(output); err == nil { os.Remove(output) } }() + output, vmxData, err := createFloppyOutput("SerialPortOutput.") + if err != nil { + t.Fatalf("Error creating output: %s", err) + } + defer func() { + if _, err := os.Stat(output); err == nil { + os.Remove(output) + } + }() config["vmx_data"] = vmxData t.Logf("Preparing to write output to %s", output) // whee err = setupVMwareBuild(t, config, provision) - if err != nil { t.Errorf("%s", err) } + if err != nil { + t.Errorf("%s", err) + } // check the output - data,err := readFloppyOutput(output) - if err != nil { t.Errorf("%s", err) } + data, err := readFloppyOutput(output) + if err != nil { + t.Errorf("%s", err) + } if data != "serial8250: ttyS1 at\n" { t.Errorf("Serial port not detected : %v", data) @@ -251,19 +283,29 @@ func TestStepCreateVmx_ParallelPort(t *testing.T) { } // where to write output - output,vmxData,err := createFloppyOutput("ParallelPortOutput.") - if err != nil { t.Fatalf("Error creating output: %s", err) } - defer func() { if _,err := os.Stat(output); err == nil { os.Remove(output) } }() + output, vmxData, err := createFloppyOutput("ParallelPortOutput.") + if err != nil { + t.Fatalf("Error creating output: %s", err) + } + defer func() { + if _, err := os.Stat(output); err == nil { + os.Remove(output) + } + }() config["vmx_data"] = vmxData t.Logf("Preparing to write output to %s", output) // whee error := setupVMwareBuild(t, config, provision) - if error != nil { t.Errorf("%s", error) } + if error != nil { + t.Errorf("%s", error) + } // check the output - data,err := readFloppyOutput(output) - if err != nil { t.Errorf("%s", err) } + data, err := readFloppyOutput(output) + if err != nil { + t.Errorf("%s", err) + } if data != "parport \n" { t.Errorf("Parallel port not detected : %v", data) @@ -279,19 +321,29 @@ func TestStepCreateVmx_Usb(t *testing.T) { } // where to write output - output,vmxData,err := createFloppyOutput("UsbOutput.") - if err != nil { t.Fatalf("Error creating output: %s", err) } - defer func() { if _,err := os.Stat(output); err == nil { os.Remove(output) } }() + output, vmxData, err := createFloppyOutput("UsbOutput.") + if err != nil { + t.Fatalf("Error creating output: %s", err) + } + defer func() { + if _, err := os.Stat(output); err == nil { + os.Remove(output) + } + }() config["vmx_data"] = vmxData t.Logf("Preparing to write output to %s", output) // whee error := setupVMwareBuild(t, config, provision) - if error != nil { t.Errorf("%s", error) } + if error != nil { + t.Errorf("%s", error) + } // check the output - data,err := readFloppyOutput(output) - if err != nil { t.Errorf("%s", err) } + data, err := readFloppyOutput(output) + if err != nil { + t.Errorf("%s", err) + } if data != "USB hub found\n" { t.Errorf("USB support not detected : %v", data) @@ -302,24 +354,34 @@ func TestStepCreateVmx_Sound(t *testing.T) { config := map[string]string{ "sound": `"TRUE"`, } - provision := map[string]string { + provision := map[string]string{ "inline": `"cat /proc/modules | egrep -o '^soundcore' > /dev/fd0"`, } // where to write output - output,vmxData,err := createFloppyOutput("SoundOutput.") - if err != nil { t.Fatalf("Error creating output: %s", err) } - defer func() { if _,err := os.Stat(output); err == nil { os.Remove(output) } }() + output, vmxData, err := createFloppyOutput("SoundOutput.") + if err != nil { + t.Fatalf("Error creating output: %s", err) + } + defer func() { + if _, err := os.Stat(output); err == nil { + os.Remove(output) + } + }() config["vmx_data"] = vmxData t.Logf("Preparing to write output to %s", output) // whee error := setupVMwareBuild(t, config, provision) - if error != nil { t.Errorf("Unable to read file: %s", error) } + if error != nil { + t.Errorf("Unable to read file: %s", error) + } // check the output - data,err := readFloppyOutput(output) - if err != nil { t.Errorf("%s", err) } + data, err := readFloppyOutput(output) + if err != nil { + t.Errorf("%s", err) + } if data != "soundcore\n" { t.Errorf("Soundcard not detected : %v", data) From 898b27c16d7f0cb2d8cf0b63384c8b9b4e26a6f6 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Wed, 12 Apr 2017 17:41:36 -0500 Subject: [PATCH 164/251] Added support for the NONE option to be specified for parallel and serial ports in the vmware iso builder. --- builder/vmware/iso/step_create_vmx.go | 12 ++++++++++++ website/source/docs/builders/vmware-iso.html.md | 6 ++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index 0261123a6..207d38caa 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -196,6 +196,9 @@ func unformat_serial(config string) (*serialUnion, error) { return &serialUnion{serialType: res, auto: res}, nil + case "NONE": + return &serialUnion{serialType: nil}, nil + default: return nil, fmt.Errorf("Unknown serial type : %s : %s", strings.ToUpper(formatType), config) } @@ -270,7 +273,11 @@ func unformat_parallel(config string) (*parallelUnion, error) { return nil, fmt.Errorf("Unknown parallel port direction : %s : %s", strings.ToUpper(formatOptions), config) } return ¶llelUnion{parallelType: res, auto: res}, nil + + case "NONE": + return ¶llelUnion{parallelType: nil}, nil } + return nil, fmt.Errorf("Unexpected format for parallel port: %s", config) } @@ -431,6 +438,8 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist templateData.Serial_Filename = filepath.FromSlash(serial.auto.devicename) templateData.Serial_Yield = serial.auto.yield templateData.Serial_Auto = "TRUE" + case nil: + break default: err := fmt.Errorf("Error procesing VMX template: %v", serial) @@ -463,6 +472,9 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist templateData.Parallel_Present = "TRUE" templateData.Parallel_Auto = "TRUE" templateData.Parallel_Bidirectional = parallel.auto.bidirectional + case nil: + break + default: err := fmt.Errorf("Error procesing VMX template: %v", parallel) state.Put("error", err) diff --git a/website/source/docs/builders/vmware-iso.html.md b/website/source/docs/builders/vmware-iso.html.md index 44285a850..95ab31bef 100644 --- a/website/source/docs/builders/vmware-iso.html.md +++ b/website/source/docs/builders/vmware-iso.html.md @@ -207,7 +207,7 @@ builder. - `parallel` (string) - This specifies a parallel port to add to the VM. It has the format of `Type:option1,option2,...`. Type can be one of the - following values: "FILE", "DEVICE", or "AUTO". + following values: "FILE", "DEVICE", "AUTO", or "NONE". * `FILE:path` - Specifies the path to the local file to be used for the parallel port. @@ -217,6 +217,7 @@ builder. parallel port. Direction can be `BI` to specify bidirectional communication or `UNI` to specify unidirectional communication. + * `NONE` - Specifies to not use a parallel port. (default) - `remote_cache_datastore` (string) - The path to the datastore where supporting files will be stored during the build on the remote machine. By @@ -254,7 +255,7 @@ builder. - `serial` (string) - This specifies a serial port to add to the VM. It has a format of `Type:option1,option2,...`. The field `Type` can be one - of the following values: `FILE`, `DEVICE`, `PIPE`, or `AUTO`. + of the following values: `FILE`, `DEVICE`, `PIPE`, `AUTO`, or `NONE`. * `FILE:path(,yield)` - Specifies the path to the local file to be used as the serial port. @@ -285,6 +286,7 @@ builder. * `yield` (bool) - This is an optional boolean that specifies whether the vm should yield the cpu when polling the port. By default, the builder will assume this as `FALSE`. + * `NONE` - Specifies to not use a serial port. (default) - `shutdown_command` (string) - The command to use to gracefully shut down the machine once all the provisioning is done. By default this is an empty From 75fbfa0763bf94b2456d041a6649ce235770d870 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 13 Apr 2017 17:39:55 -0500 Subject: [PATCH 165/251] Replaced a hacky type assertion in the VMware builder with a call to Driver.GetVmwareDriver() that returns the driver-specific structure for ip and addressing information. Also implemented the addressing functions for the ESXi driver interface. This fixes an issue where a driver might not have defined a VmwareDriver by forcing a developer to implement it via the standard Driver interface. --- builder/vmware/common/driver.go | 1 + builder/vmware/common/driver_fusion5.go | 4 + builder/vmware/common/driver_fusion6.go | 4 + builder/vmware/common/driver_player5.go | 4 + builder/vmware/common/driver_player6.go | 4 + builder/vmware/common/driver_workstation10.go | 4 + builder/vmware/common/driver_workstation9.go | 4 + builder/vmware/iso/driver_esx5.go | 101 +++++++++++++++++- builder/vmware/iso/step_create_vmx.go | 2 +- 9 files changed, 126 insertions(+), 2 deletions(-) diff --git a/builder/vmware/common/driver.go b/builder/vmware/common/driver.go index 745b48d69..3e38442a2 100644 --- a/builder/vmware/common/driver.go +++ b/builder/vmware/common/driver.go @@ -64,6 +64,7 @@ type Driver interface { /// These methods are generally implemented by the VmwareDriver /// structure within this file. A driver implementation can /// reimplement these, though, if it wants. + GetVmwareDriver() VmwareDriver // Get the guest hw address for the vm GuestAddress(multistep.StateBag) (string, error) diff --git a/builder/vmware/common/driver_fusion5.go b/builder/vmware/common/driver_fusion5.go index 4bc5ae301..e8c49155c 100644 --- a/builder/vmware/common/driver_fusion5.go +++ b/builder/vmware/common/driver_fusion5.go @@ -173,3 +173,7 @@ const fusionSuppressPlist = ` ` + +func (d *Fusion5Driver) GetVmwareDriver() VmwareDriver { + return d.VmwareDriver +} diff --git a/builder/vmware/common/driver_fusion6.go b/builder/vmware/common/driver_fusion6.go index af42c7ef9..2f0563eb5 100644 --- a/builder/vmware/common/driver_fusion6.go +++ b/builder/vmware/common/driver_fusion6.go @@ -68,3 +68,7 @@ func (d *Fusion6Driver) Verify() error { return compareVersions(matches[1], VMWARE_FUSION_VERSION, "Fusion Professional") } + +func (d *Fusion6Driver) GetVmwareDriver() VmwareDriver { + return d.Fusion5Driver.VmwareDriver +} diff --git a/builder/vmware/common/driver_player5.go b/builder/vmware/common/driver_player5.go index 240b44a11..04fb4e897 100644 --- a/builder/vmware/common/driver_player5.go +++ b/builder/vmware/common/driver_player5.go @@ -209,3 +209,7 @@ func (d *Player5Driver) ToolsIsoPath(flavor string) string { func (d *Player5Driver) ToolsInstall() error { return nil } + +func (d *Player5Driver) GetVmwareDriver() VmwareDriver { + return d.VmwareDriver +} diff --git a/builder/vmware/common/driver_player6.go b/builder/vmware/common/driver_player6.go index d1cc7be88..1ccb84b85 100644 --- a/builder/vmware/common/driver_player6.go +++ b/builder/vmware/common/driver_player6.go @@ -35,3 +35,7 @@ func (d *Player6Driver) Verify() error { return playerVerifyVersion(VMWARE_PLAYER_VERSION) } + +func (d *Player6Driver) GetVmwareDriver() VmwareDriver { + return d.Player5Driver.VmwareDriver +} diff --git a/builder/vmware/common/driver_workstation10.go b/builder/vmware/common/driver_workstation10.go index 7e6ac8b8f..471fce37d 100644 --- a/builder/vmware/common/driver_workstation10.go +++ b/builder/vmware/common/driver_workstation10.go @@ -33,3 +33,7 @@ func (d *Workstation10Driver) Verify() error { return workstationVerifyVersion(VMWARE_WS_VERSION) } + +func (d *Workstation10Driver) GetVmwareDriver() VmwareDriver { + return d.Workstation9Driver.VmwareDriver +} diff --git a/builder/vmware/common/driver_workstation9.go b/builder/vmware/common/driver_workstation9.go index 8a6eb5556..af2d7df35 100644 --- a/builder/vmware/common/driver_workstation9.go +++ b/builder/vmware/common/driver_workstation9.go @@ -170,3 +170,7 @@ func (d *Workstation9Driver) ToolsIsoPath(flavor string) string { func (d *Workstation9Driver) ToolsInstall() error { return nil } + +func (d *Workstation9Driver) GetVmwareDriver() VmwareDriver { + return d.VmwareDriver +} diff --git a/builder/vmware/iso/driver_esx5.go b/builder/vmware/iso/driver_esx5.go index 6df8271f6..d2f00ea81 100644 --- a/builder/vmware/iso/driver_esx5.go +++ b/builder/vmware/iso/driver_esx5.go @@ -26,7 +26,7 @@ import ( // ESX5 driver talks to an ESXi5 hypervisor remotely over SSH to build // virtual machines. This driver can only manage one machine at a time. type ESX5Driver struct { - vmwcommon.VmwareDriver + base vmwcommon.VmwareDriver Host string Port uint @@ -173,6 +173,101 @@ func (d *ESX5Driver) HostIP(multistep.StateBag) (string, error) { return host, err } +func (d *ESX5Driver) GuestIP(multistep.StateBag) (string, error) { + // GuestIP is defined by the user as d.Host..but let's validate it just to be sure + conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", d.Host, d.Port)) + defer conn.Close() + if err != nil { + return "", err + } + + host, _, err := net.SplitHostPort(conn.RemoteAddr().String()) + return host, err +} + +func (d *ESX5Driver) HostAddress(multistep.StateBag) (string, error) { + // make a connection + conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", d.Host, d.Port)) + defer conn.Close() + if err != nil { + return "", err + } + + // get the local address (the host) + host, _, err := net.SplitHostPort(conn.LocalAddr().String()) + if err != nil { + return "", fmt.Errorf("Unable to determine host address for ESXi: %v", err) + } + + // iterate through all the interfaces.. + interfaces, err := net.Interfaces() + if err != nil { + return "", fmt.Errorf("Unable to enumerate host interfaces : %v", err) + } + + for _, intf := range interfaces { + addrs, err := intf.Addrs() + if err != nil { + continue + } + + // ..checking to see if any if it's addrs match the host address + for _, addr := range addrs { + if addr.String() == host { // FIXME: Is this the proper way to compare two HardwareAddrs? + return intf.HardwareAddr.String(), nil + } + } + } + + // ..unfortunately nothing was found + return "", fmt.Errorf("Unable to locate interface matching host address in ESXi: %v", host) +} + +func (d *ESX5Driver) GuestAddress(multistep.StateBag) (string, error) { + // list all the interfaces on the esx host + r, err := d.esxcli("network", "ip", "interface", "list") + if err != nil { + return "", fmt.Errorf("Could not retrieve network interfaces for ESXi: %v", err) + } + + // rip out the interface name and the MAC address from the csv output + addrs := make(map[string]string) + for record, err := r.read(); record != nil && err == nil; record, err = r.read() { + if strings.ToUpper(record["Enabled"]) != "TRUE" { + continue + } + addrs[record["Name"]] = record["MAC Address"] + } + + // list all the addresses on the esx host + r, err = d.esxcli("network", "ip", "interface", "ipv4", "get") + if err != nil { + return "", fmt.Errorf("Could not retrieve network addresses for ESXi: %v", err) + } + + // figure out the interface name that matches the specified d.Host address + var intf string + intf = "" + for record, err := r.read(); record != nil && err == nil; record, err = r.read() { + if record["IPv4 Address"] == d.Host && record["Name"] != "" { + intf = record["Name"] + break + } + } + if intf == "" { + return "", fmt.Errorf("Unable to find matching address for ESXi guest") + } + + // find the MAC address according to the interface name + result, ok := addrs[intf] + if !ok { + return "", fmt.Errorf("Unable to find address for ESXi guest interface") + } + + // ..and we're good + return result, nil +} + func (d *ESX5Driver) VNCAddress(_ string, portMin, portMax uint) (string, uint, error) { var vncPort uint @@ -530,6 +625,10 @@ func (d *ESX5Driver) esxcli(args ...string) (*esxcliReader, error) { return &esxcliReader{r, header}, nil } +func (d *ESX5Driver) GetVmwareDriver() vmwcommon.VmwareDriver { + return d.base +} + type esxcliReader struct { cr *csv.Reader header []string diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index 207d38caa..bf6bb3d6c 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -370,7 +370,7 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist /// Check the network type that the user specified network := config.Network - driver := state.Get("driver").(vmwcommon.VmwareDriver) + driver := state.Get("driver").(vmwcommon.Driver).GetVmwareDriver() // read netmap config pathNetmap := driver.NetmapConfPath() From 6423525a33562ca891fef4c8ab89b833a9f70971 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 13 Apr 2017 19:10:40 -0500 Subject: [PATCH 166/251] Updated imports of github.com/mitchellh/packer to new naming scheme github.com/hashicorp/packer --- builder/vmware/iso/driver_esx5.go | 2 +- builder/vmware/iso/step_create_vmx_test.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/vmware/iso/driver_esx5.go b/builder/vmware/iso/driver_esx5.go index d2f00ea81..3f18656d0 100644 --- a/builder/vmware/iso/driver_esx5.go +++ b/builder/vmware/iso/driver_esx5.go @@ -14,12 +14,12 @@ import ( "strings" "time" + vmwcommon "github.com/hashicorp/packer/builder/vmware/common" commonssh "github.com/hashicorp/packer/common/ssh" "github.com/hashicorp/packer/communicator/ssh" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" "github.com/mitchellh/multistep" - vmwcommon "github.com/mitchellh/packer/builder/vmware/common" gossh "golang.org/x/crypto/ssh" ) diff --git a/builder/vmware/iso/step_create_vmx_test.go b/builder/vmware/iso/step_create_vmx_test.go index 7f5c1d91c..31177957f 100644 --- a/builder/vmware/iso/step_create_vmx_test.go +++ b/builder/vmware/iso/step_create_vmx_test.go @@ -12,9 +12,9 @@ import ( "strconv" "strings" - "github.com/mitchellh/packer/packer" - "github.com/mitchellh/packer/provisioner/shell" - "github.com/mitchellh/packer/template" + "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/provisioner/shell" + "github.com/hashicorp/packer/template" "testing" ) From 258804106bff27a6b9d8fd8efed727fb2ce289f4 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 13 Apr 2017 19:19:43 -0500 Subject: [PATCH 167/251] Added missing GetVmwareDriver() method to VMware Builder's DriverMock. --- builder/vmware/common/driver_mock.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/builder/vmware/common/driver_mock.go b/builder/vmware/common/driver_mock.go index 562eab382..8b533db88 100644 --- a/builder/vmware/common/driver_mock.go +++ b/builder/vmware/common/driver_mock.go @@ -206,3 +206,20 @@ func (d *DriverMock) Verify() error { d.VerifyCalled = true return d.VerifyErr } + +func (d *DriverMock) GetVmwareDriver() VmwareDriver { + var state VmwareDriver + state.DhcpLeasesPath = func(string) string { + return "/path/to/dhcp.leases" + } + state.DhcpConfPath = func(string) string { + return "/path/to/dhcp.conf" + } + state.VmnetnatConfPath = func(string) string { + return "/path/to/vmnetnat.conf" + } + state.NetmapConfPath = func() string { + return "/path/to/netmap.conf" + } + return state +} From 58ebc5c9a5cc932678f7b1624171a4704ea133fc Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Thu, 13 Apr 2017 20:01:10 -0500 Subject: [PATCH 168/251] When specifying NONE for serial or parallel in the VMware builder, disable the serial and parallel port devices entirely. --- builder/vmware/iso/step_create_vmx.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index bf6bb3d6c..137b0c736 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -404,7 +404,9 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist state.Put("vmnetwork", network) /// check if serial port has been configured - if config.Serial != "" { + if config.Serial == "" { + templateData.Serial_Present = "FALSE" + } else { serial, err := unformat_serial(config.Serial) if err != nil { err := fmt.Errorf("Error procesing VMX template: %s", err) @@ -439,6 +441,7 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist templateData.Serial_Yield = serial.auto.yield templateData.Serial_Auto = "TRUE" case nil: + templateData.Serial_Present = "FALSE" break default: @@ -450,7 +453,9 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist } /// check if parallel port has been configured - if config.Parallel != "" { + if config.Parallel == "" { + templateData.Parallel_Present = "FALSE" + } else { parallel, err := unformat_parallel(config.Parallel) if err != nil { err := fmt.Errorf("Error procesing VMX template: %s", err) @@ -473,6 +478,7 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist templateData.Parallel_Auto = "TRUE" templateData.Parallel_Bidirectional = parallel.auto.bidirectional case nil: + templateData.Parallel_Present = "FALSE" break default: From 029c357d8c52e86d6733b6d342da3c55d8fecbc6 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Sat, 17 Jun 2017 15:35:58 -0500 Subject: [PATCH 169/251] Modified some tests to require the PACKER_ACC environment variable to be set before executing them. This turns them into acceptance tests as per CONTRIBUTING.md. --- builder/vmware/common/step_shutdown_test.go | 4 ++++ builder/vmware/iso/step_create_vmx_test.go | 20 ++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/builder/vmware/common/step_shutdown_test.go b/builder/vmware/common/step_shutdown_test.go index ed0eae486..ff775f2f2 100644 --- a/builder/vmware/common/step_shutdown_test.go +++ b/builder/vmware/common/step_shutdown_test.go @@ -116,6 +116,10 @@ func TestStepShutdown_noCommand(t *testing.T) { } func TestStepShutdown_locks(t *testing.T) { + if os.Getenv("PACKER_ACC") == "" { + t.Skip("This test is only run with PACKER_ACC=1 due to the requirement of access to the VMware binaries.") + } + state := testStepShutdownState(t) step := new(StepShutdown) step.Testing = true diff --git a/builder/vmware/iso/step_create_vmx_test.go b/builder/vmware/iso/step_create_vmx_test.go index 31177957f..7553dff82 100644 --- a/builder/vmware/iso/step_create_vmx_test.go +++ b/builder/vmware/iso/step_create_vmx_test.go @@ -199,6 +199,10 @@ func setupVMwareBuild(t *testing.T, builderConfig map[string]string, provisioner } func TestStepCreateVmx_SerialFile(t *testing.T) { + if os.Getenv("PACKER_ACC") == "" { + t.Skip("This test is only run with PACKER_ACC=1 due to the requirement of access to the VMware binaries.") + } + tmpfile := tmpnam("SerialFileInput.") serialConfig := map[string]string{ @@ -223,6 +227,10 @@ func TestStepCreateVmx_SerialFile(t *testing.T) { } func TestStepCreateVmx_SerialPort(t *testing.T) { + if os.Getenv("PACKER_ACC") == "" { + t.Skip("This test is only run with PACKER_ACC=1 due to the requirement of access to the VMware binaries.") + } + var defaultSerial string if runtime.GOOS == "windows" { defaultSerial = "COM1" @@ -268,6 +276,10 @@ func TestStepCreateVmx_SerialPort(t *testing.T) { } func TestStepCreateVmx_ParallelPort(t *testing.T) { + if os.Getenv("PACKER_ACC") == "" { + t.Skip("This test is only run with PACKER_ACC=1 due to the requirement of access to the VMware binaries.") + } + var defaultParallel string if runtime.GOOS == "windows" { defaultParallel = "LPT1" @@ -313,6 +325,10 @@ func TestStepCreateVmx_ParallelPort(t *testing.T) { } func TestStepCreateVmx_Usb(t *testing.T) { + if os.Getenv("PACKER_ACC") == "" { + t.Skip("This test is only run with PACKER_ACC=1 due to the requirement of access to the VMware binaries.") + } + config := map[string]string{ "usb": `"TRUE"`, } @@ -351,6 +367,10 @@ func TestStepCreateVmx_Usb(t *testing.T) { } func TestStepCreateVmx_Sound(t *testing.T) { + if os.Getenv("PACKER_ACC") == "" { + t.Skip("This test is only run with PACKER_ACC=1 due to the requirement of access to the VMware binaries.") + } + config := map[string]string{ "sound": `"TRUE"`, } From 8cc0776f3a0d5335a5ef1eb5b35a1f19121fb5e4 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Sun, 10 Sep 2017 16:02:45 -0500 Subject: [PATCH 170/251] Fixed oversight in VMware builder's mock-driver that neglected to initialize 'HostAddressResult'. --- builder/vmware/common/driver_mock.go | 29 ++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/builder/vmware/common/driver_mock.go b/builder/vmware/common/driver_mock.go index 8b533db88..9c63a7bf9 100644 --- a/builder/vmware/common/driver_mock.go +++ b/builder/vmware/common/driver_mock.go @@ -1,6 +1,7 @@ package common import ( + "net" "sync" "github.com/hashicorp/packer/helper/multistep" @@ -127,13 +128,41 @@ func (d *DriverMock) CommHost(state multistep.StateBag) (string, error) { return d.CommHostResult, d.CommHostErr } +func MockInterface() net.Interface { + interfaces, err := net.Interfaces() + + // Build a dummy interface due to being unable to enumerate interfaces + if err != nil || len(interfaces) == 0 { + return net.Interface{ + Index: 0, + MTU: -1, + Name: "dummy", + HardwareAddr: net.HardwareAddr{0, 0, 0, 0, 0, 0}, + Flags: net.FlagLoopback, + } + } + + // Find the first loopback interface + for _, intf := range interfaces { + if intf.Flags&net.FlagLoopback == net.FlagLoopback { + return intf + } + } + + // Fall-back to just the first one + return interfaces[0] +} + func (d *DriverMock) HostAddress(state multistep.StateBag) (string, error) { + intf := MockInterface() + d.HostAddressResult = intf.HardwareAddr.String() d.HostAddressCalled = true d.HostAddressState = state return d.HostAddressResult, d.HostAddressErr } func (d *DriverMock) HostIP(state multistep.StateBag) (string, error) { + d.HostIPResult = "127.0.0.1" d.HostIPCalled = true d.HostIPState = state return d.HostIPResult, d.HostIPErr From 069d00f70b871ded6517a5034aa7a4c1c5b7ccfc Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Mon, 16 Oct 2017 11:59:10 -0500 Subject: [PATCH 171/251] Added the paths suggested by @phekmat and @SwampDragons for VMware Fusion... Although parser for the new mapper format is likely to be needed still. --- builder/vmware/common/driver_fusion5.go | 13 ++++++++++++- builder/vmware/common/driver_fusion6.go | 17 +++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/builder/vmware/common/driver_fusion5.go b/builder/vmware/common/driver_fusion5.go index e8c49155c..c6b3780dc 100644 --- a/builder/vmware/common/driver_fusion5.go +++ b/builder/vmware/common/driver_fusion5.go @@ -141,10 +141,21 @@ func (d *Fusion5Driver) Verify() error { return err } - // default paths + libpath := filepath.Join("Library", "Preferences", "VMware Fusion") + d.VmwareDriver.DhcpLeasesPath = func(device string) string { return "/var/db/vmware/vmnet-dhcpd-" + device + ".leases" } + d.VmwareDriver.DhcpConfPath = func(device string) string { + return filepath.Join(libpath, device, "dhcpd.conf") + } + d.VmwareDriver.VmnetnatConfPath = func(device string) string { + return filepath.Join(libpath, device, "nat.conf") + } + d.VmwareDriver.NetmapConfPath = func() string { + // FIXME: Suggested by @phekmat. This will need another parser to be implemented. + return filepath.Join(libpath, "networking") + } return nil } diff --git a/builder/vmware/common/driver_fusion6.go b/builder/vmware/common/driver_fusion6.go index 2f0563eb5..e68bb0de4 100644 --- a/builder/vmware/common/driver_fusion6.go +++ b/builder/vmware/common/driver_fusion6.go @@ -66,6 +66,23 @@ func (d *Fusion6Driver) Verify() error { } log.Printf("Detected VMware version: %s", matches[1]) + libpath := filepath.Join("Library", "Preferences", "VMware Fusion") + + d.VmwareDriver.DhcpLeasesPath = func(device string) string { + return "/var/db/vmware/vmnet-dhcpd-" + device + ".leases" + } + d.VmwareDriver.DhcpConfPath = func(device string) string { + return filepath.Join(libpath, device, "dhcpd.conf") + } + + d.VmwareDriver.VmnetnatConfPath = func(device string) string { + return filepath.Join(libpath, device, "nat.conf") + } + d.VmwareDriver.NetmapConfPath = func() string { + // FIXME: Suggested by @phekmat. This will need another parser to be implemented. + return filepath.Join(libpath, "networking") + } + return compareVersions(matches[1], VMWARE_FUSION_VERSION, "Fusion Professional") } From b2fec18b1e32dc37932323d84156fa5b5d1a8d8a Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Mon, 23 Oct 2017 20:26:16 -0500 Subject: [PATCH 172/251] Added parser for VMware Fusion's networking file. Replaced VmwareDriver's NetmapConfPath with a NetworkMapperInterface in order to handle the differences between VMware Fusion and the rest of the VMware suite. --- builder/vmware/common/driver.go | 29 +- builder/vmware/common/driver_fusion5.go | 16 +- builder/vmware/common/driver_fusion6.go | 16 +- builder/vmware/common/driver_mock.go | 18 +- builder/vmware/common/driver_parser.go | 847 ++++++++++++++++++- builder/vmware/common/driver_player5.go | 15 +- builder/vmware/common/driver_workstation9.go | 15 +- builder/vmware/iso/step_create_vmx.go | 12 +- 8 files changed, 922 insertions(+), 46 deletions(-) diff --git a/builder/vmware/common/driver.go b/builder/vmware/common/driver.go index 3e38442a2..6b5cfde1f 100644 --- a/builder/vmware/common/driver.go +++ b/builder/vmware/common/driver.go @@ -274,7 +274,10 @@ type VmwareDriver struct { DhcpLeasesPath func(string) string DhcpConfPath func(string) string VmnetnatConfPath func(string) string - NetmapConfPath func() string + + /// This method returns an object with the NetworkNameMapper interface + /// that maps network to device and vice-versa. + NetworkMapper func() (NetworkNameMapper, error) } func (d *VmwareDriver) GuestAddress(state multistep.StateBag) (string, error) { @@ -304,12 +307,8 @@ func (d *VmwareDriver) GuestAddress(state multistep.StateBag) (string, error) { func (d *VmwareDriver) GuestIP(state multistep.StateBag) (string, error) { - // read netmap config - pathNetmap := d.NetmapConfPath() - if _, err := os.Stat(pathNetmap); err != nil { - return "", fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) - } - netmap, err := ReadNetmapConfig(pathNetmap) + // grab network mapper + netmap, err := d.NetworkMapper() if err != nil { return "", err } @@ -401,12 +400,8 @@ func (d *VmwareDriver) GuestIP(state multistep.StateBag) (string, error) { func (d *VmwareDriver) HostAddress(state multistep.StateBag) (string, error) { - // parse network<->device mapping - pathNetmap := d.NetmapConfPath() - if _, err := os.Stat(pathNetmap); err != nil { - return "", fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) - } - netmap, err := ReadNetmapConfig(pathNetmap) + // grab mapper for converting network<->device + netmap, err := d.NetworkMapper() if err != nil { return "", err } @@ -471,12 +466,8 @@ func (d *VmwareDriver) HostAddress(state multistep.StateBag) (string, error) { func (d *VmwareDriver) HostIP(state multistep.StateBag) (string, error) { - // parse network<->device mapping - pathNetmap := d.NetmapConfPath() - if _, err := os.Stat(pathNetmap); err != nil { - return "", fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) - } - netmap, err := ReadNetmapConfig(pathNetmap) + // grab mapper for converting network<->device + netmap, err := d.NetworkMapper() if err != nil { return "", err } diff --git a/builder/vmware/common/driver_fusion5.go b/builder/vmware/common/driver_fusion5.go index c6b3780dc..c450e1f61 100644 --- a/builder/vmware/common/driver_fusion5.go +++ b/builder/vmware/common/driver_fusion5.go @@ -152,9 +152,19 @@ func (d *Fusion5Driver) Verify() error { d.VmwareDriver.VmnetnatConfPath = func(device string) string { return filepath.Join(libpath, device, "nat.conf") } - d.VmwareDriver.NetmapConfPath = func() string { - // FIXME: Suggested by @phekmat. This will need another parser to be implemented. - return filepath.Join(libpath, "networking") + d.VmwareDriver.NetworkMapper = func() (NetworkNameMapper, error) { + pathNetworking := filepath.Join(libpath, "networking") + if _, err := os.Stat(pathNetworking); err != nil { + return nil, fmt.Errorf("Could not find networking conf file: %s", pathNetworking) + } + + fd, err := os.Open(pathNetworking) + if err != nil { + return nil, err + } + defer fd.Close() + + return ReadNetworkingConfig(fd) } return nil diff --git a/builder/vmware/common/driver_fusion6.go b/builder/vmware/common/driver_fusion6.go index e68bb0de4..62dd7b218 100644 --- a/builder/vmware/common/driver_fusion6.go +++ b/builder/vmware/common/driver_fusion6.go @@ -78,9 +78,19 @@ func (d *Fusion6Driver) Verify() error { d.VmwareDriver.VmnetnatConfPath = func(device string) string { return filepath.Join(libpath, device, "nat.conf") } - d.VmwareDriver.NetmapConfPath = func() string { - // FIXME: Suggested by @phekmat. This will need another parser to be implemented. - return filepath.Join(libpath, "networking") + d.VmwareDriver.NetworkMapper = func() (NetworkNameMapper, error) { + pathNetworking := filepath.Join(libpath, "networking") + if _, err := os.Stat(pathNetworking); err != nil { + return nil, fmt.Errorf("Could not find networking conf file: %s", pathNetworking) + } + + fd, err := os.Open(pathNetworking) + if err != nil { + return nil, err + } + defer fd.Close() + + return ReadNetworkingConfig(fd) } return compareVersions(matches[1], VMWARE_FUSION_VERSION, "Fusion Professional") diff --git a/builder/vmware/common/driver_mock.go b/builder/vmware/common/driver_mock.go index 9c63a7bf9..f857f4adc 100644 --- a/builder/vmware/common/driver_mock.go +++ b/builder/vmware/common/driver_mock.go @@ -92,6 +92,20 @@ type DriverMock struct { VerifyErr error } +type NetworkMapperMock struct { + NameIntoDeviceCalled int + DeviceIntoNameCalled int +} + +func (m NetworkMapperMock) NameIntoDevice(name string) (string, error) { + m.NameIntoDeviceCalled += 1 + return "", nil +} +func (m NetworkMapperMock) DeviceIntoName(device string) (string, error) { + m.DeviceIntoNameCalled += 1 + return "", nil +} + func (d *DriverMock) Clone(dst string, src string) error { d.CloneCalled = true d.CloneDst = dst @@ -247,8 +261,8 @@ func (d *DriverMock) GetVmwareDriver() VmwareDriver { state.VmnetnatConfPath = func(string) string { return "/path/to/vmnetnat.conf" } - state.NetmapConfPath = func() string { - return "/path/to/netmap.conf" + state.NetworkMapper = func() (NetworkNameMapper, error) { + return NetworkMapperMock{}, nil } return state } diff --git a/builder/vmware/common/driver_parser.go b/builder/vmware/common/driver_parser.go index dbf15b5b5..7a026fea8 100644 --- a/builder/vmware/common/driver_parser.go +++ b/builder/vmware/common/driver_parser.go @@ -3,8 +3,11 @@ package common import ( "fmt" "io" + "log" + "math" "net" "os" + "reflect" "sort" "strconv" "strings" @@ -1053,6 +1056,11 @@ func (e *DhcpConfiguration) HostByName(host string) (configDeclaration, error) { /*** Network Map */ type NetworkMap []map[string]string +type NetworkNameMapper interface { + NameIntoDevice(string) (string, error) + DeviceIntoName(string) (string, error) +} + func ReadNetworkMap(fd *os.File) (NetworkMap, error) { fromfile, eof := consumeFile(fd) @@ -1066,16 +1074,16 @@ func ReadNetworkMap(fd *os.File) (NetworkMap, error) { return result, nil } -func (e *NetworkMap) NameIntoDevice(name string) (string, error) { - for _, val := range *e { +func (e NetworkMap) NameIntoDevice(name string) (string, error) { + for _, val := range e { if strings.ToLower(val["name"]) == strings.ToLower(name) { return val["device"], nil } } return "", fmt.Errorf("Network name not found : %v", name) } -func (e *NetworkMap) DeviceIntoName(device string) (string, error) { - for _, val := range *e { +func (e NetworkMap) DeviceIntoName(device string) (string, error) { + for _, val := range e { if strings.ToLower(val["device"]) == strings.ToLower(device) { return val["name"], nil } @@ -1091,7 +1099,836 @@ func (e *NetworkMap) repr() string { return strings.Join(result, "\n") } -/** main */ +/*** parser for VMware Fusion's networking file */ +func tokenizeNetworkingConfig(eof sentinelSignaller, in chan byte) chan string { + var ch byte + var state string + var repeat_newline bool + + out := make(chan string) + go func(out chan string) { + for reading := true; reading; { + select { + case <-eof: + reading = false + + case ch = <-in: + switch ch { + case '\r': + fallthrough + case '\t': + fallthrough + case ' ': + if len(state) == 0 { + continue + } + out <- state + state = "" + case '\n': + if repeat_newline { + continue + } + if len(state) > 0 { + out <- state + } + out <- string(ch) + state = "" + repeat_newline = true + continue + default: + state += string(ch) + } + repeat_newline = false + } + } + if len(state) > 0 { + out <- state + } + }(out) + return out +} + +func splitNetworkingConfig(eof sentinelSignaller, in chan string) chan []string { + var out chan []string + + out = make(chan []string) + go func(out chan []string) { + row := make([]string, 0) + for reading := true; reading; { + select { + case <-eof: + reading = false + + case tk := <-in: + switch tk { + case "\n": + if len(row) > 0 { + out <- row + } + row = make([]string, 0) + default: + row = append(row, tk) + } + } + } + if len(row) > 0 { + out <- row + } + }(out) + return out +} + +/// All token types in networking file. +// VERSION token +type networkingVERSION struct { + value string +} + +func networkingReadVersion(row []string) (*networkingVERSION, error) { + if len(row) != 1 { + return nil, fmt.Errorf("Unexpected format for VERSION entry : %v", row) + } + res := &networkingVERSION{value: row[0]} + if !res.Valid() { + return nil, fmt.Errorf("Unexpected format for VERSION entry : %v", row) + } + return res, nil +} + +func (s networkingVERSION) Repr() string { + if !s.Valid() { + return fmt.Sprintf("VERSION{INVALID=\"%v\"}", s.value) + } + return fmt.Sprintf("VERSION{%f}", s.Number()) +} + +func (s networkingVERSION) Valid() bool { + tokens := strings.SplitN(s.value, "=", 2) + if len(tokens) != 2 || tokens[0] != "VERSION" { + return false + } + + tokens = strings.Split(tokens[1], ",") + if len(tokens) != 2 { + return false + } + + for _, t := range tokens { + _, err := strconv.ParseUint(t, 10, 64) + if err != nil { + return false + } + } + return true +} + +func (s networkingVERSION) Number() float64 { + var result float64 + tokens := strings.SplitN(s.value, "=", 2) + tokens = strings.Split(tokens[1], ",") + + integer, err := strconv.ParseUint(tokens[0], 10, 64) + if err != nil { + integer = 0 + } + result = float64(integer) + + mantissa, err := strconv.ParseUint(tokens[1], 10, 64) + if err != nil { + return result + } + denomination := math.Pow(10.0, float64(len(tokens[1]))) + return result + (float64(mantissa) / denomination) +} + +// VNET_X token +type networkingVNET struct { + value string +} + +func (s networkingVNET) Valid() bool { + if strings.ToUpper(s.value) != s.value { + return false + } + tokens := strings.SplitN(s.value, "_", 3) + if len(tokens) != 3 || tokens[0] != "VNET" { + return false + } + _, err := strconv.ParseUint(tokens[1], 10, 64) + if err != nil { + return false + } + return true +} + +func (s networkingVNET) Number() int { + tokens := strings.SplitN(s.value, "_", 3) + res, err := strconv.Atoi(tokens[1]) + if err != nil { + return ^int(0) + } + return res +} + +func (s networkingVNET) Option() string { + tokens := strings.SplitN(s.value, "_", 3) + if len(tokens) == 3 { + return tokens[2] + } + return "" +} + +func (s networkingVNET) Repr() string { + if !s.Valid() { + tokens := strings.SplitN(s.value, "_", 3) + return fmt.Sprintf("VNET{INVALID=%v}", tokens) + } + return fmt.Sprintf("VNET{%d} %s", s.Number(), s.Option()) +} + +// Interface name +type networkingInterface struct { + name string +} + +func (s networkingInterface) Interface() (*net.Interface, error) { + return net.InterfaceByName(s.name) +} + +// networking command entry types +type networkingCommandEntry_answer struct { + vnet networkingVNET + value string +} +type networkingCommandEntry_remove_answer struct { + vnet networkingVNET +} +type networkingCommandEntry_add_nat_portfwd struct { + vnet int + protocol string + port int + target_host net.IP + target_port int +} +type networkingCommandEntry_remove_nat_portfwd struct { + vnet int + protocol string + port int +} +type networkingCommandEntry_add_dhcp_mac_to_ip struct { + vnet int + mac net.HardwareAddr + ip net.IP +} +type networkingCommandEntry_remove_dhcp_mac_to_ip struct { + vnet int + mac net.HardwareAddr +} +type networkingCommandEntry_add_bridge_mapping struct { + intf networkingInterface + vnet int +} +type networkingCommandEntry_remove_bridge_mapping struct { + intf networkingInterface +} +type networkingCommandEntry_add_nat_prefix struct { + vnet int + prefix int +} +type networkingCommandEntry_remove_nat_prefix struct { + vnet int + prefix int +} + +type networkingCommandEntry struct { + entry interface{} + answer *networkingCommandEntry_answer + remove_answer *networkingCommandEntry_remove_answer + add_nat_portfwd *networkingCommandEntry_add_nat_portfwd + remove_nat_portfwd *networkingCommandEntry_remove_nat_portfwd + add_dhcp_mac_to_ip *networkingCommandEntry_add_dhcp_mac_to_ip + remove_dhcp_mac_to_ip *networkingCommandEntry_remove_dhcp_mac_to_ip + add_bridge_mapping *networkingCommandEntry_add_bridge_mapping + remove_bridge_mapping *networkingCommandEntry_remove_bridge_mapping + add_nat_prefix *networkingCommandEntry_add_nat_prefix + remove_nat_prefix *networkingCommandEntry_remove_nat_prefix +} + +func (e networkingCommandEntry) Name() string { + switch e.entry.(type) { + case networkingCommandEntry_answer: + return "answer" + case networkingCommandEntry_remove_answer: + return "remove_answer" + case networkingCommandEntry_add_nat_portfwd: + return "add_nat_portfwd" + case networkingCommandEntry_remove_nat_portfwd: + return "remove_nat_portfwd" + case networkingCommandEntry_add_dhcp_mac_to_ip: + return "add_dhcp_mac_to_ip" + case networkingCommandEntry_remove_dhcp_mac_to_ip: + return "remove_dhcp_mac_to_ip" + case networkingCommandEntry_add_bridge_mapping: + return "add_bridge_mapping" + case networkingCommandEntry_remove_bridge_mapping: + return "remove_bridge_mapping" + case networkingCommandEntry_add_nat_prefix: + return "add_nat_prefix" + case networkingCommandEntry_remove_nat_prefix: + return "remove_nat_prefix" + } + return "" +} + +func (e networkingCommandEntry) Entry() reflect.Value { + this := reflect.ValueOf(e) + switch e.entry.(type) { + case networkingCommandEntry_answer: + return reflect.Indirect(this.FieldByName("answer")) + case networkingCommandEntry_remove_answer: + return reflect.Indirect(this.FieldByName("remove_answer")) + case networkingCommandEntry_add_nat_portfwd: + return reflect.Indirect(this.FieldByName("add_nat_portfwd")) + case networkingCommandEntry_remove_nat_portfwd: + return reflect.Indirect(this.FieldByName("remove_nat_portfwd")) + case networkingCommandEntry_add_dhcp_mac_to_ip: + return reflect.Indirect(this.FieldByName("add_dhcp_mac_to_ip")) + case networkingCommandEntry_remove_dhcp_mac_to_ip: + return reflect.Indirect(this.FieldByName("remove_dhcp_mac_to_ip")) + case networkingCommandEntry_add_bridge_mapping: + return reflect.Indirect(this.FieldByName("add_bridge_mapping")) + case networkingCommandEntry_remove_bridge_mapping: + return reflect.Indirect(this.FieldByName("remove_bridge_mapping")) + case networkingCommandEntry_add_nat_prefix: + return reflect.Indirect(this.FieldByName("add_nat_prefix")) + case networkingCommandEntry_remove_nat_prefix: + return reflect.Indirect(this.FieldByName("remove_nat_prefix")) + } + return reflect.Value{} +} + +func (e networkingCommandEntry) Repr() string { + var result map[string]interface{} + result = make(map[string]interface{}) + + entryN, entry := e.Name(), e.Entry() + entryT := entry.Type() + for i := 0; i < entry.NumField(); i++ { + fld, fldT := entry.Field(i), entryT.Field(i) + result[fldT.Name] = fld + } + return fmt.Sprintf("%s -> %v", entryN, result) +} + +// networking command entry parsers +func parseNetworkingCommand_answer(row []string) (*networkingCommandEntry, error) { + if len(row) != 2 { + return nil, fmt.Errorf("Expected %d arguments. Received only %d.", 2, len(row)) + } + vnet := networkingVNET{value: row[0]} + if !vnet.Valid() { + return nil, fmt.Errorf("Invalid format for VNET.") + } + value := row[1] + + result := networkingCommandEntry_answer{vnet: vnet, value: value} + return &networkingCommandEntry{entry: result, answer: &result}, nil +} +func parseNetworkingCommand_remove_answer(row []string) (*networkingCommandEntry, error) { + if len(row) != 1 { + return nil, fmt.Errorf("Expected %d argument. Received %d.", 1, len(row)) + } + vnet := networkingVNET{value: row[0]} + if !vnet.Valid() { + return nil, fmt.Errorf("Invalid format for VNET.") + } + + result := networkingCommandEntry_remove_answer{vnet: vnet} + return &networkingCommandEntry{entry: result, remove_answer: &result}, nil +} +func parseNetworkingCommand_add_nat_portfwd(row []string) (*networkingCommandEntry, error) { + if len(row) != 5 { + return nil, fmt.Errorf("Expected %d arguments. Received only %d.", 5, len(row)) + } + + vnet, err := strconv.Atoi(row[0]) + if err != nil { + return nil, fmt.Errorf("Unable to parse first argument as an integer. : %v", row[0]) + } + + protocol := strings.ToLower(row[1]) + if !(protocol == "tcp" || protocol == "udp") { + return nil, fmt.Errorf("Expected \"tcp\" or \"udp\" for second argument. : %v", row[1]) + } + + sport, err := strconv.Atoi(row[2]) + if err != nil { + return nil, fmt.Errorf("Unable to parse third argument as an integer. : %v", row[2]) + } + + dest := net.ParseIP(row[3]) + if dest == nil { + return nil, fmt.Errorf("Unable to parse fourth argument as an IPv4 address. : %v", row[2]) + } + + dport, err := strconv.Atoi(row[4]) + if err != nil { + return nil, fmt.Errorf("Unable to parse fifth argument as an integer. : %v", row[4]) + } + + result := networkingCommandEntry_add_nat_portfwd{vnet: vnet - 1, protocol: protocol, port: sport, target_host: dest, target_port: dport} + return &networkingCommandEntry{entry: result, add_nat_portfwd: &result}, nil +} +func parseNetworkingCommand_remove_nat_portfwd(row []string) (*networkingCommandEntry, error) { + if len(row) != 3 { + return nil, fmt.Errorf("Expected %d arguments. Received only %d.", 3, len(row)) + } + + vnet, err := strconv.Atoi(row[0]) + if err != nil { + return nil, fmt.Errorf("Unable to parse first argument as an integer. : %v", row[0]) + } + + protocol := strings.ToLower(row[1]) + if !(protocol == "tcp" || protocol == "udp") { + return nil, fmt.Errorf("Expected \"tcp\" or \"udp\" for second argument. : %v", row[1]) + } + + sport, err := strconv.Atoi(row[2]) + if err != nil { + return nil, fmt.Errorf("Unable to parse third argument as an integer. : %v", row[2]) + } + + result := networkingCommandEntry_remove_nat_portfwd{vnet: vnet - 1, protocol: protocol, port: sport} + return &networkingCommandEntry{entry: result, remove_nat_portfwd: &result}, nil +} +func parseNetworkingCommand_add_dhcp_mac_to_ip(row []string) (*networkingCommandEntry, error) { + if len(row) != 3 { + return nil, fmt.Errorf("Expected %d arguments. Received only %d.", 3, len(row)) + } + + vnet, err := strconv.Atoi(row[0]) + if err != nil { + return nil, fmt.Errorf("Unable to parse first argument as an integer. : %v", row[0]) + } + + mac, err := net.ParseMAC(row[1]) + if err != nil { + return nil, fmt.Errorf("Unable to parse second argument as hwaddr. : %v", row[1]) + } + + ip := net.ParseIP(row[2]) + if ip != nil { + return nil, fmt.Errorf("Unable to parse third argument as ipv4. : %v", row[2]) + } + + result := networkingCommandEntry_add_dhcp_mac_to_ip{vnet: vnet - 1, mac: mac, ip: ip} + return &networkingCommandEntry{entry: result, add_dhcp_mac_to_ip: &result}, nil +} +func parseNetworkingCommand_remove_dhcp_mac_to_ip(row []string) (*networkingCommandEntry, error) { + if len(row) != 2 { + return nil, fmt.Errorf("Expected %d arguments. Received only %d.", 2, len(row)) + } + + vnet, err := strconv.Atoi(row[0]) + if err != nil { + return nil, fmt.Errorf("Unable to parse first argument as an integer. : %v", row[0]) + } + + mac, err := net.ParseMAC(row[1]) + if err != nil { + return nil, fmt.Errorf("Unable to parse second argument as hwaddr. : %v", row[1]) + } + + result := networkingCommandEntry_remove_dhcp_mac_to_ip{vnet: vnet - 1, mac: mac} + return &networkingCommandEntry{entry: result, remove_dhcp_mac_to_ip: &result}, nil +} +func parseNetworkingCommand_add_bridge_mapping(row []string) (*networkingCommandEntry, error) { + if len(row) != 2 { + return nil, fmt.Errorf("Expected %d arguments. Received only %d.", 2, len(row)) + } + intf := networkingInterface{name: row[0]} + + vnet, err := strconv.Atoi(row[1]) + if err != nil { + return nil, fmt.Errorf("Unable to parse second argument as an integer. : %v", row[2]) + } + + result := networkingCommandEntry_add_bridge_mapping{intf: intf, vnet: vnet - 1} + return &networkingCommandEntry{entry: result, add_bridge_mapping: &result}, nil +} +func parseNetworkingCommand_remove_bridge_mapping(row []string) (*networkingCommandEntry, error) { + if len(row) != 1 { + return nil, fmt.Errorf("Expected %d argument. Received %d.", 1, len(row)) + } + intf := networkingInterface{name: row[0]} + /* + number, err := strconv.Atoi(row[0]) + if err != nil { + return nil, fmt.Errorf("Unable to parse first argument as an integer. : %v", row[0]) + } + */ + result := networkingCommandEntry_remove_bridge_mapping{intf: intf} + return &networkingCommandEntry{entry: result, remove_bridge_mapping: &result}, nil +} +func parseNetworkingCommand_add_nat_prefix(row []string) (*networkingCommandEntry, error) { + if len(row) != 2 { + return nil, fmt.Errorf("Expected %d arguments. Received only %d.", 2, len(row)) + } + + vnet, err := strconv.Atoi(row[0]) + if err != nil { + return nil, fmt.Errorf("Unable to parse first argument as an integer. : %v", row[0]) + } + + if !strings.HasPrefix(row[1], "/") { + return nil, fmt.Errorf("Expected second argument to begin with \"/\". : %v", row[1]) + } + + prefix, err := strconv.Atoi(row[1][1:]) + if err != nil { + return nil, fmt.Errorf("Unable to parse prefix out of second argument. : %v", row[1]) + } + + result := networkingCommandEntry_add_nat_prefix{vnet: vnet - 1, prefix: prefix} + return &networkingCommandEntry{entry: result, add_nat_prefix: &result}, nil +} +func parseNetworkingCommand_remove_nat_prefix(row []string) (*networkingCommandEntry, error) { + if len(row) != 2 { + return nil, fmt.Errorf("Expected %d arguments. Received only %d.", 2, len(row)) + } + + vnet, err := strconv.Atoi(row[0]) + if err != nil { + return nil, fmt.Errorf("Unable to parse first argument as an integer. : %v", row[0]) + } + + if !strings.HasPrefix(row[1], "/") { + return nil, fmt.Errorf("Expected second argument to begin with \"/\". : %v", row[1]) + } + prefix, err := strconv.Atoi(row[1][1:]) + if err != nil { + return nil, fmt.Errorf("Unable to parse prefix out of second argument. : %v", row[1]) + } + + result := networkingCommandEntry_remove_nat_prefix{vnet: vnet - 1, prefix: prefix} + return &networkingCommandEntry{entry: result, remove_nat_prefix: &result}, nil +} + +type networkingCommandParser struct { + command string + callback func([]string) (*networkingCommandEntry, error) +} + +var NetworkingCommandParsers = []networkingCommandParser{ + /* DictRecordParseFunct */ {command: "answer", callback: parseNetworkingCommand_answer}, + /* DictRecordParseFunct */ {command: "remove_answer", callback: parseNetworkingCommand_remove_answer}, + /* NatFwdRecordParseFunct */ {command: "add_nat_portfwd", callback: parseNetworkingCommand_add_nat_portfwd}, + /* NatFwdRecordParseFunct */ {command: "remove_nat_portfwd", callback: parseNetworkingCommand_remove_nat_portfwd}, + /* DhcpMacRecordParseFunct */ {command: "add_dhcp_mac_to_ip", callback: parseNetworkingCommand_add_dhcp_mac_to_ip}, + /* DhcpMacRecordParseFunct */ {command: "remove_dhcp_mac_to_ip", callback: parseNetworkingCommand_remove_dhcp_mac_to_ip}, + /* BridgeMappingRecordParseFunct */ {command: "add_bridge_mapping", callback: parseNetworkingCommand_add_bridge_mapping}, + /* BridgeMappingRecordParseFunct */ {command: "remove_bridge_mapping", callback: parseNetworkingCommand_remove_bridge_mapping}, + /* NatPrefixRecordParseFunct */ {command: "add_nat_prefix", callback: parseNetworkingCommand_add_nat_prefix}, + /* NatPrefixRecordParseFunct */ {command: "remove_nat_prefix", callback: parseNetworkingCommand_remove_nat_prefix}, +} + +func NetworkingParserByCommand(command string) *func([]string) (*networkingCommandEntry, error) { + for _, p := range NetworkingCommandParsers { + if p.command == command { + return &p.callback + } + } + return nil +} + +func parseNetworkingConfig(eof sentinelSignaller, rows chan []string) chan networkingCommandEntry { + var out chan networkingCommandEntry + + out = make(chan networkingCommandEntry) + go func(in chan []string, out chan networkingCommandEntry) { + for reading := true; reading; { + select { + case <-eof: + reading = false + case row := <-in: + if len(row) >= 1 { + parser := NetworkingParserByCommand(row[0]) + if parser == nil { + log.Printf("Invalid command : %v", row) + continue + } + callback := *parser + entry, err := callback(row[1:]) + if err != nil { + log.Printf("Unable to parse command : %v %v", err, row) + continue + } + out <- *entry + } + } + } + }(rows, out) + return out +} + +type NetworkingConfig struct { + answer map[int]map[string]string + nat_portfwd map[int]map[string]string + dhcp_mac_to_ip map[int]map[string]net.IP + //bridge_mapping map[net.Interface]uint64 // XXX: we don't need the actual interface for anything but informing the user. + bridge_mapping map[string]int + nat_prefix map[int][]int +} + +func (c NetworkingConfig) repr() string { + return fmt.Sprintf("answer -> %v\nnat_portfwd -> %v\ndhcp_mac_to_ip -> %v\nbridge_mapping -> %v\nnat_prefix -> %v", c.answer, c.nat_portfwd, c.dhcp_mac_to_ip, c.bridge_mapping, c.nat_prefix) +} + +func flattenNetworkingConfig(eof sentinelSignaller, in chan networkingCommandEntry) NetworkingConfig { + var result NetworkingConfig + var vmnet int + + result.answer = make(map[int]map[string]string) + result.nat_portfwd = make(map[int]map[string]string) + result.dhcp_mac_to_ip = make(map[int]map[string]net.IP) + result.bridge_mapping = make(map[string]int) + result.nat_prefix = make(map[int][]int) + + for reading := true; reading; { + select { + case <-eof: + reading = false + case e := <-in: + switch e.entry.(type) { + case networkingCommandEntry_answer: + vnet := e.answer.vnet + answers, exists := result.answer[vnet.Number()] + if !exists { + answers = make(map[string]string) + result.answer[vnet.Number()] = answers + } + answers[vnet.Option()] = e.answer.value + case networkingCommandEntry_remove_answer: + vnet := e.remove_answer.vnet + answers, exists := result.answer[vnet.Number()] + if exists { + delete(answers, vnet.Option()) + } else { + log.Printf("Unable to remove answer %s as specified by `remove_answer`.\n", vnet.Repr()) + } + case networkingCommandEntry_add_nat_portfwd: + vmnet = e.add_nat_portfwd.vnet + protoport := fmt.Sprintf("%s/%d", e.add_nat_portfwd.protocol, e.add_nat_portfwd.port) + target := fmt.Sprintf("%s:%d", e.add_nat_portfwd.target_host, e.add_nat_portfwd.target_port) + portfwds, exists := result.nat_portfwd[vmnet] + if !exists { + portfwds = make(map[string]string) + result.nat_portfwd[vmnet] = portfwds + } + portfwds[protoport] = target + case networkingCommandEntry_remove_nat_portfwd: + vmnet = e.remove_nat_portfwd.vnet + protoport := fmt.Sprintf("%s/%d", e.remove_nat_portfwd.protocol, e.remove_nat_portfwd.port) + portfwds, exists := result.nat_portfwd[vmnet] + if exists { + delete(portfwds, protoport) + } else { + log.Printf("Unable to remove nat port-forward %s from interface %s%d as requested by `remove_nat_portfwd`.\n", protoport, NetworkingInterfacePrefix, vmnet) + } + case networkingCommandEntry_add_dhcp_mac_to_ip: + vmnet = e.add_dhcp_mac_to_ip.vnet + dhcpmacs, exists := result.dhcp_mac_to_ip[vmnet] + if !exists { + dhcpmacs = make(map[string]net.IP) + result.dhcp_mac_to_ip[vmnet] = dhcpmacs + } + dhcpmacs[e.add_dhcp_mac_to_ip.mac.String()] = e.add_dhcp_mac_to_ip.ip + case networkingCommandEntry_remove_dhcp_mac_to_ip: + vmnet = e.remove_dhcp_mac_to_ip.vnet + dhcpmacs, exists := result.dhcp_mac_to_ip[vmnet] + if exists { + delete(dhcpmacs, e.remove_dhcp_mac_to_ip.mac.String()) + } else { + log.Printf("Unable to remove dhcp_mac_to_ip entry %v from interface %s%d as specified by `remove_dhcp_mac_to_ip`.\n", e.remove_dhcp_mac_to_ip, NetworkingInterfacePrefix, vmnet) + } + case networkingCommandEntry_add_bridge_mapping: + intf := e.add_bridge_mapping.intf + if _, err := intf.Interface(); err != nil { + log.Printf("Interface \"%s\" as specified by `add_bridge_mapping` was not found on the current platform. This is a non-critical error. Ignoring.", intf.name) + } + result.bridge_mapping[intf.name] = e.add_bridge_mapping.vnet + case networkingCommandEntry_remove_bridge_mapping: + intf := e.remove_bridge_mapping.intf + if _, err := intf.Interface(); err != nil { + log.Printf("Interface \"%s\" as specified by `remove_bridge_mapping` was not found on the current platform. This is a non-critical error. Ignoring.", intf.name) + } + delete(result.bridge_mapping, intf.name) + case networkingCommandEntry_add_nat_prefix: + vmnet = e.add_nat_prefix.vnet + _, exists := result.nat_prefix[vmnet] + if exists { + result.nat_prefix[vmnet] = append(result.nat_prefix[vmnet], e.add_nat_prefix.prefix) + } else { + result.nat_prefix[vmnet] = []int{e.add_nat_prefix.prefix} + } + case networkingCommandEntry_remove_nat_prefix: + vmnet = e.remove_nat_prefix.vnet + prefixes, exists := result.nat_prefix[vmnet] + if exists { + for index := 0; index < len(prefixes); index++ { + if prefixes[index] == e.remove_nat_prefix.prefix { + result.nat_prefix[vmnet] = append(prefixes[:index], prefixes[index+1:]...) + break + } + } + } else { + log.Printf("Unable to remove nat prefix /%d from interface %s%d as specified by `remove_nat_prefix`.\n", e.remove_nat_prefix.prefix, NetworkingInterfacePrefix, vmnet) + } + } + } + } + return result +} + +// Constructor for networking file +func ReadNetworkingConfig(fd *os.File) (NetworkingConfig, error) { + // start piecing together different parts of the file + fromfile, eof := consumeFile(fd) + tokenized := tokenizeNetworkingConfig(eof, fromfile) + rows := splitNetworkingConfig(eof, tokenized) + entries := parseNetworkingConfig(eof, rows) + + // parse the version + parsed_version, err := networkingReadVersion(<-rows) + if err != nil { + return NetworkingConfig{}, err + } + + // verify that it's 1.0 since that's all we support. + version := parsed_version.Number() + if version != 1.0 { + return NetworkingConfig{}, fmt.Errorf("Expected version %f of networking file. Received version %f.", 1.0, version) + } + + // convert to a configuration + result := flattenNetworkingConfig(eof, entries) + return result, nil +} + +// netmapper interface +type NetworkingType int + +const ( + NetworkingType_HOSTONLY = iota + 1 + NetworkingType_NAT + NetworkingType_BRIDGED +) + +func networkingConfig_InterfaceTypes(config NetworkingConfig) map[int]NetworkingType { + var result map[int]NetworkingType + result = make(map[int]NetworkingType) + + // defaults + result[0] = NetworkingType_BRIDGED + result[1] = NetworkingType_HOSTONLY + result[8] = NetworkingType_NAT + + // walk through config collecting bridged interfaces + for _, vmnet := range config.bridge_mapping { + result[vmnet] = NetworkingType_BRIDGED + } + + // walk through answers finding out which ones are nat versus hostonly + for vmnet, table := range config.answer { + // everything should be defined as a virtual adapter... + if table["VIRTUAL_ADAPTER"] == "yes" { + + // validate that the VNET entry contains everything we expect it to + _, subnetQ := table["HOSTONLY_SUBNET"] + _, netmaskQ := table["HOSTONLY_NETMASK"] + if !(subnetQ && netmaskQ) { + log.Printf("Interface %s%d is missing some expected keys (HOSTONLY_SUBNET, HOSTONLY_NETMASK). This is non-critical. Ignoring..", NetworkingInterfacePrefix, vmnet) + } + + // distinguish between nat or hostonly + if table["NAT"] == "yes" { + result[vmnet] = NetworkingType_NAT + } else { + result[vmnet] = NetworkingType_HOSTONLY + } + + // if it's not a virtual_adapter, then it must be an alias (really a bridge). + } else { + result[vmnet] = NetworkingType_BRIDGED + } + } + return result +} + +func networkingConfig_NamesToVmnet(config NetworkingConfig) map[NetworkingType][]int { + types := networkingConfig_InterfaceTypes(config) + + // now sort the keys + var keys []int + for vmnet := range types { + keys = append(keys, vmnet) + } + sort.Ints(keys) + + // build result dictionary + var result map[NetworkingType][]int + result = make(map[NetworkingType][]int) + for i := 0; i < len(keys); i++ { + t := types[keys[i]] + result[t] = append(result[t], keys[i]) + } + return result +} + +const NetworkingInterfacePrefix = "vmnet" + +func (e NetworkingConfig) NameIntoDevice(name string) (string, error) { + netmapper := networkingConfig_NamesToVmnet(e) + name = strings.ToLower(name) + + var vmnet int + if name == "hostonly" && len(netmapper[NetworkingType_HOSTONLY]) > 0 { + vmnet = netmapper[NetworkingType_HOSTONLY][0] + } else if name == "nat" && len(netmapper[NetworkingType_NAT]) > 0 { + vmnet = netmapper[NetworkingType_NAT][0] + } else if name == "bridged" && len(netmapper[NetworkingType_BRIDGED]) > 0 { + vmnet = netmapper[NetworkingType_BRIDGED][0] + } else { + return "", fmt.Errorf("Network name not found : %v", name) + } + return fmt.Sprintf("%s%d", NetworkingInterfacePrefix, vmnet), nil +} + +func (e NetworkingConfig) DeviceIntoName(device string) (string, error) { + types := networkingConfig_InterfaceTypes(e) + + lowerdevice := strings.ToLower(device) + if !strings.HasPrefix(lowerdevice, NetworkingInterfacePrefix) { + return device, nil + } + vmnet, err := strconv.Atoi(lowerdevice[len(NetworkingInterfacePrefix):]) + if err != nil { + return "", err + } + network := types[vmnet] + switch network { + case NetworkingType_HOSTONLY: + return "hostonly", nil + case NetworkingType_NAT: + return "nat", nil + case NetworkingType_BRIDGED: + return "bridged", nil + } + return "", fmt.Errorf("Unable to determine network type for device %s%d.", NetworkingInterfacePrefix, vmnet) +} + +/** generic async file reader */ func consumeFile(fd *os.File) (chan byte, sentinelSignaller) { fromfile := make(chan byte) eof := make(sentinelSignaller) diff --git a/builder/vmware/common/driver_player5.go b/builder/vmware/common/driver_player5.go index 04fb4e897..2814cbad3 100644 --- a/builder/vmware/common/driver_player5.go +++ b/builder/vmware/common/driver_player5.go @@ -196,8 +196,19 @@ func (d *Player5Driver) Verify() error { return playerVmnetnatConfPath(device) } - d.VmwareDriver.NetmapConfPath = func() string { - return playerNetmapConfPath() + d.VmwareDriver.NetworkMapper = func() (NetworkNameMapper, error) { + pathNetmap := playerNetmapConfPath() + if _, err := os.Stat(pathNetmap); err != nil { + return nil, fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) + } + + fd, err := os.Open(pathNetmap) + if err != nil { + return nil, err + } + defer fd.Close() + + return ReadNetworkMap(fd) } return nil } diff --git a/builder/vmware/common/driver_workstation9.go b/builder/vmware/common/driver_workstation9.go index af2d7df35..c2c905922 100644 --- a/builder/vmware/common/driver_workstation9.go +++ b/builder/vmware/common/driver_workstation9.go @@ -157,8 +157,19 @@ func (d *Workstation9Driver) Verify() error { return workstationVmnetnatConfPath(device) } - d.VmwareDriver.NetmapConfPath = func() string { - return workstationNetmapConfPath() + d.VmwareDriver.NetworkMapper = func() (NetworkNameMapper, error) { + pathNetmap := workstationNetmapConfPath() + if _, err := os.Stat(pathNetmap); err != nil { + return nil, fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) + } + + fd, err := os.Open(pathNetmap) + if err != nil { + return nil, err + } + defer fd.Close() + + return ReadNetworkMap(fd) } return nil } diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index 137b0c736..7728260ad 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -373,16 +373,8 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist driver := state.Get("driver").(vmwcommon.Driver).GetVmwareDriver() // read netmap config - pathNetmap := driver.NetmapConfPath() - if _, err := os.Stat(pathNetmap); err != nil { - err := fmt.Errorf("Could not find netmap conf file: %s", pathNetmap) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt - } - netmap, res := vmwcommon.ReadNetmapConfig(pathNetmap) - if res != nil { - err := fmt.Errorf("Unable to read netmap conf file: %s: %v", pathNetmap, res) + netmap, err := driver.NetworkMapper() + if err != nil { state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt From 737e951685e1ddb89e130668634de493e6e6db09 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Fri, 17 Nov 2017 10:26:40 -0600 Subject: [PATCH 173/251] Added missing root path to path-finders for the VMware Fusion implementation in the vmware builder as mentioned by @SwampDragons. --- builder/vmware/common/driver_fusion5.go | 2 +- builder/vmware/common/driver_fusion6.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/vmware/common/driver_fusion5.go b/builder/vmware/common/driver_fusion5.go index c450e1f61..e3c0f061e 100644 --- a/builder/vmware/common/driver_fusion5.go +++ b/builder/vmware/common/driver_fusion5.go @@ -141,7 +141,7 @@ func (d *Fusion5Driver) Verify() error { return err } - libpath := filepath.Join("Library", "Preferences", "VMware Fusion") + libpath := filepath.Join("/", "Library", "Preferences", "VMware Fusion") d.VmwareDriver.DhcpLeasesPath = func(device string) string { return "/var/db/vmware/vmnet-dhcpd-" + device + ".leases" diff --git a/builder/vmware/common/driver_fusion6.go b/builder/vmware/common/driver_fusion6.go index 62dd7b218..1f959d35d 100644 --- a/builder/vmware/common/driver_fusion6.go +++ b/builder/vmware/common/driver_fusion6.go @@ -66,7 +66,7 @@ func (d *Fusion6Driver) Verify() error { } log.Printf("Detected VMware version: %s", matches[1]) - libpath := filepath.Join("Library", "Preferences", "VMware Fusion") + libpath := filepath.Join("/", "Library", "Preferences", "VMware Fusion") d.VmwareDriver.DhcpLeasesPath = func(device string) string { return "/var/db/vmware/vmnet-dhcpd-" + device + ".leases" From 594ed950c7bcb076ef70704d8a3ef9b9a6a7eb4d Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Wed, 22 Nov 2017 17:11:13 -0600 Subject: [PATCH 174/251] Fixed a race condition in builder/vmware/common/driver_parser.go due to a misunderstanding how channels work when you close them. --- builder/vmware/common/driver_parser.go | 61 ++++++++++++++++---------- 1 file changed, 39 insertions(+), 22 deletions(-) diff --git a/builder/vmware/common/driver_parser.go b/builder/vmware/common/driver_parser.go index 7a026fea8..24ecaf0df 100644 --- a/builder/vmware/common/driver_parser.go +++ b/builder/vmware/common/driver_parser.go @@ -17,8 +17,9 @@ type sentinelSignaller chan struct{} /** low-level parsing */ // strip the comments and extraneous newlines from a byte channel -func uncomment(eof sentinelSignaller, in <-chan byte) chan byte { +func uncomment(eof sentinelSignaller, in <-chan byte) (chan byte, sentinelSignaller) { out := make(chan byte) + eoc := make(sentinelSignaller) go func(in <-chan byte, out chan byte) { var endofline bool @@ -40,16 +41,19 @@ func uncomment(eof sentinelSignaller, in <-chan byte) chan byte { } } } + close(eoc) }(in, out) - return out + return out, eoc } // convert a byte channel into a channel of pseudo-tokens -func tokenizeDhcpConfig(eof sentinelSignaller, in chan byte) chan string { +func tokenizeDhcpConfig(eof sentinelSignaller, in chan byte) (chan string, sentinelSignaller) { var ch byte var state string var quote bool + eot := make(sentinelSignaller) + out := make(chan string) go func(out chan string) { for stillReading := true; stillReading; { @@ -106,8 +110,9 @@ func tokenizeDhcpConfig(eof sentinelSignaller, in chan byte) chan string { if len(state) > 0 { out <- state } + close(eot) }(out) - return out + return out, eot } /** mid-level parsing */ @@ -220,12 +225,14 @@ func parseDhcpConfig(eof sentinelSignaller, in chan string) (tkGroup, error) { return result, nil } -func tokenizeNetworkMapConfig(eof sentinelSignaller, in chan byte) chan string { +func tokenizeNetworkMapConfig(eof sentinelSignaller, in chan byte) (chan string, sentinelSignaller) { var ch byte var state string var quote bool var lastnewline bool + eot := make(sentinelSignaller) + out := make(chan string) go func(out chan string) { for stillReading := true; stillReading; { @@ -291,8 +298,9 @@ func tokenizeNetworkMapConfig(eof sentinelSignaller, in chan byte) chan string { if len(state) > 0 { out <- state } + close(eot) }(out) - return out + return out, eot } func parseNetworkMapConfig(eof sentinelSignaller, in chan string) (NetworkMap, error) { @@ -966,9 +974,9 @@ type DhcpConfiguration []configDeclaration func ReadDhcpConfiguration(fd *os.File) (DhcpConfiguration, error) { fromfile, eof := consumeFile(fd) - uncommented := uncomment(eof, fromfile) - tokenized := tokenizeDhcpConfig(eof, uncommented) - parsetree, err := parseDhcpConfig(eof, tokenized) + uncommented, eoc := uncomment(eof, fromfile) + tokenized, eot := tokenizeDhcpConfig(eoc, uncommented) + parsetree, err := parseDhcpConfig(eot, tokenized) if err != nil { return nil, err } @@ -1064,10 +1072,10 @@ type NetworkNameMapper interface { func ReadNetworkMap(fd *os.File) (NetworkMap, error) { fromfile, eof := consumeFile(fd) - uncommented := uncomment(eof, fromfile) - tokenized := tokenizeNetworkMapConfig(eof, uncommented) + uncommented, eoc := uncomment(eof, fromfile) + tokenized, eot := tokenizeNetworkMapConfig(eoc, uncommented) - result, err := parseNetworkMapConfig(eof, tokenized) + result, err := parseNetworkMapConfig(eot, tokenized) if err != nil { return nil, err } @@ -1100,11 +1108,13 @@ func (e *NetworkMap) repr() string { } /*** parser for VMware Fusion's networking file */ -func tokenizeNetworkingConfig(eof sentinelSignaller, in chan byte) chan string { +func tokenizeNetworkingConfig(eof sentinelSignaller, in chan byte) (chan string, sentinelSignaller) { var ch byte var state string var repeat_newline bool + eot := make(sentinelSignaller) + out := make(chan string) go func(out chan string) { for reading := true; reading; { @@ -1144,13 +1154,16 @@ func tokenizeNetworkingConfig(eof sentinelSignaller, in chan byte) chan string { if len(state) > 0 { out <- state } + close(eot) }(out) - return out + return out, eot } -func splitNetworkingConfig(eof sentinelSignaller, in chan string) chan []string { +func splitNetworkingConfig(eof sentinelSignaller, in chan string) (chan []string, sentinelSignaller) { var out chan []string + eos := make(sentinelSignaller) + out = make(chan []string) go func(out chan []string) { row := make([]string, 0) @@ -1174,8 +1187,9 @@ func splitNetworkingConfig(eof sentinelSignaller, in chan string) chan []string if len(row) > 0 { out <- row } + close(eos) }(out) - return out + return out, eos } /// All token types in networking file. @@ -1642,9 +1656,11 @@ func NetworkingParserByCommand(command string) *func([]string) (*networkingComma return nil } -func parseNetworkingConfig(eof sentinelSignaller, rows chan []string) chan networkingCommandEntry { +func parseNetworkingConfig(eof sentinelSignaller, rows chan []string) (chan networkingCommandEntry, sentinelSignaller) { var out chan networkingCommandEntry + eop := make(sentinelSignaller) + out = make(chan networkingCommandEntry) go func(in chan []string, out chan networkingCommandEntry) { for reading := true; reading; { @@ -1668,8 +1684,9 @@ func parseNetworkingConfig(eof sentinelSignaller, rows chan []string) chan netwo } } } + close(eop) }(rows, out) - return out + return out, eop } type NetworkingConfig struct { @@ -1795,9 +1812,9 @@ func flattenNetworkingConfig(eof sentinelSignaller, in chan networkingCommandEnt func ReadNetworkingConfig(fd *os.File) (NetworkingConfig, error) { // start piecing together different parts of the file fromfile, eof := consumeFile(fd) - tokenized := tokenizeNetworkingConfig(eof, fromfile) - rows := splitNetworkingConfig(eof, tokenized) - entries := parseNetworkingConfig(eof, rows) + tokenized, eot := tokenizeNetworkingConfig(eof, fromfile) + rows, eos := splitNetworkingConfig(eot, tokenized) + entries, eop := parseNetworkingConfig(eos, rows) // parse the version parsed_version, err := networkingReadVersion(<-rows) @@ -1812,7 +1829,7 @@ func ReadNetworkingConfig(fd *os.File) (NetworkingConfig, error) { } // convert to a configuration - result := flattenNetworkingConfig(eof, entries) + result := flattenNetworkingConfig(eop, entries) return result, nil } From 74946071d255e74145240232aa9cc6770685c840 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Mon, 25 Dec 2017 14:08:08 -0600 Subject: [PATCH 175/251] Added support for specifying the disk adapter type to the vmware builders. This was squashed from the vmware-diskAdapterType branch (#2968) as submitted by Rami Abughazaleh . This closes #5671 and possibly #4885. arizvisa: Updated icnocop's documentation to include the possible disk adapter types that one can specify. arizvisa: Tweaked icnocop's support for the `disk_adapter_type` option to the VMWare builder that caused conflicts due to version skew. icnocop: Updated links to the Virtual Disk Manager User's Guide PDF to open in a new window and also added the Adobe PDF icon icnocop: Added support for vmware to specify the disk adapter type, ide or scsi (lsilogic or buslogic) --- builder/vmware/common/driver.go | 2 +- builder/vmware/common/driver_fusion5.go | 4 +- builder/vmware/common/driver_mock.go | 14 +-- builder/vmware/common/driver_player5.go | 4 +- builder/vmware/common/driver_workstation9.go | 4 +- builder/vmware/iso/builder.go | 6 ++ builder/vmware/iso/driver_esx5.go | 4 +- builder/vmware/iso/step_create_disk.go | 4 +- builder/vmware/iso/step_create_vmx.go | 87 +++++++++++++++--- .../images/Adobe_PDF_file_icon_24x24.png | Bin 0 -> 1288 bytes .../source/docs/builders/vmware-iso.html.md | 11 ++- 11 files changed, 108 insertions(+), 32 deletions(-) create mode 100644 website/source/assets/images/Adobe_PDF_file_icon_24x24.png diff --git a/builder/vmware/common/driver.go b/builder/vmware/common/driver.go index 6b5cfde1f..8fa185efd 100644 --- a/builder/vmware/common/driver.go +++ b/builder/vmware/common/driver.go @@ -29,7 +29,7 @@ type Driver interface { CompactDisk(string) error // CreateDisk creates a virtual disk with the given size. - CreateDisk(string, string, string) error + CreateDisk(string, string, string, string) error // Checks if the VMX file at the given path is running. IsRunning(string) (bool, error) diff --git a/builder/vmware/common/driver_fusion5.go b/builder/vmware/common/driver_fusion5.go index e3c0f061e..9c81efd4a 100644 --- a/builder/vmware/common/driver_fusion5.go +++ b/builder/vmware/common/driver_fusion5.go @@ -41,8 +41,8 @@ func (d *Fusion5Driver) CompactDisk(diskPath string) error { return nil } -func (d *Fusion5Driver) CreateDisk(output string, size string, type_id string) error { - cmd := exec.Command(d.vdiskManagerPath(), "-c", "-s", size, "-a", "lsilogic", "-t", type_id, output) +func (d *Fusion5Driver) CreateDisk(output string, size string, adapter_type string, type_id string) error { + cmd := exec.Command(d.vdiskManagerPath(), "-c", "-s", size, "-a", adapter_type, "-t", type_id, output) if _, _, err := runAndLog(cmd); err != nil { return err } diff --git a/builder/vmware/common/driver_mock.go b/builder/vmware/common/driver_mock.go index f857f4adc..a0955db88 100644 --- a/builder/vmware/common/driver_mock.go +++ b/builder/vmware/common/driver_mock.go @@ -19,11 +19,12 @@ type DriverMock struct { CompactDiskPath string CompactDiskErr error - CreateDiskCalled bool - CreateDiskOutput string - CreateDiskSize string - CreateDiskTypeId string - CreateDiskErr error + CreateDiskCalled bool + CreateDiskOutput string + CreateDiskSize string + CreateDiskAdapterType string + CreateDiskTypeId string + CreateDiskErr error IsRunningCalled bool IsRunningPath string @@ -119,10 +120,11 @@ func (d *DriverMock) CompactDisk(path string) error { return d.CompactDiskErr } -func (d *DriverMock) CreateDisk(output string, size string, typeId string) error { +func (d *DriverMock) CreateDisk(output string, size string, adapterType string, typeId string) error { d.CreateDiskCalled = true d.CreateDiskOutput = output d.CreateDiskSize = size + d.CreateDiskAdapterType = adapterType d.CreateDiskTypeId = typeId return d.CreateDiskErr } diff --git a/builder/vmware/common/driver_player5.go b/builder/vmware/common/driver_player5.go index 2814cbad3..fb14dcb52 100644 --- a/builder/vmware/common/driver_player5.go +++ b/builder/vmware/common/driver_player5.go @@ -64,12 +64,12 @@ func (d *Player5Driver) qemuCompactDisk(diskPath string) error { return nil } -func (d *Player5Driver) CreateDisk(output string, size string, type_id string) error { +func (d *Player5Driver) CreateDisk(output string, size string, adapter_type string, type_id string) error { var cmd *exec.Cmd if d.QemuImgPath != "" { cmd = exec.Command(d.QemuImgPath, "create", "-f", "vmdk", "-o", "compat6", output, size) } else { - cmd = exec.Command(d.VdiskManagerPath, "-c", "-s", size, "-a", "lsilogic", "-t", type_id, output) + cmd = exec.Command(d.VdiskManagerPath, "-c", "-s", size, "-a", adapter_type, "-t", type_id, output) } if _, _, err := runAndLog(cmd); err != nil { return err diff --git a/builder/vmware/common/driver_workstation9.go b/builder/vmware/common/driver_workstation9.go index c2c905922..25b4ddd2c 100644 --- a/builder/vmware/common/driver_workstation9.go +++ b/builder/vmware/common/driver_workstation9.go @@ -42,8 +42,8 @@ func (d *Workstation9Driver) CompactDisk(diskPath string) error { return nil } -func (d *Workstation9Driver) CreateDisk(output string, size string, type_id string) error { - cmd := exec.Command(d.VdiskManagerPath, "-c", "-s", size, "-a", "lsilogic", "-t", type_id, output) +func (d *Workstation9Driver) CreateDisk(output string, size string, adapter_type string, type_id string) error { + cmd := exec.Command(d.VdiskManagerPath, "-c", "-s", size, "-a", adapter_type, "-t", type_id, output) if _, _, err := runAndLog(cmd); err != nil { return err } diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index 2dcc082ac..5e2908964 100644 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -40,6 +40,7 @@ type Config struct { // disk drives AdditionalDiskSize []uint `mapstructure:"disk_additional_size"` + DiskAdapterType string `mapstructure:"disk_adapter_type"` DiskName string `mapstructure:"vmdk_name"` DiskSize uint `mapstructure:"disk_size"` DiskTypeId string `mapstructure:"disk_type_id"` @@ -126,6 +127,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.DiskSize = 40000 } + if b.config.DiskAdapterType == "" { + // Default is lsilogic + b.config.DiskAdapterType = "lsilogic" + } + if b.config.DiskTypeId == "" { // Default is growable virtual disk split in 2GB files. b.config.DiskTypeId = "1" diff --git a/builder/vmware/iso/driver_esx5.go b/builder/vmware/iso/driver_esx5.go index 3f18656d0..40cc35eb1 100644 --- a/builder/vmware/iso/driver_esx5.go +++ b/builder/vmware/iso/driver_esx5.go @@ -50,9 +50,9 @@ func (d *ESX5Driver) CompactDisk(diskPathLocal string) error { return nil } -func (d *ESX5Driver) CreateDisk(diskPathLocal string, size string, typeId string) error { +func (d *ESX5Driver) CreateDisk(diskPathLocal string, size string, adapter_type string, typeId string) error { diskPath := d.datastorePath(diskPathLocal) - return d.sh("vmkfstools", "-c", size, "-d", typeId, "-a", "lsilogic", diskPath) + return d.sh("vmkfstools", "-c", size, "-d", typeId, "-a", adapter_type, diskPath) } func (d *ESX5Driver) IsRunning(string) (bool, error) { diff --git a/builder/vmware/iso/step_create_disk.go b/builder/vmware/iso/step_create_disk.go index ddee55c7f..788de2ddc 100644 --- a/builder/vmware/iso/step_create_disk.go +++ b/builder/vmware/iso/step_create_disk.go @@ -28,7 +28,7 @@ func (stepCreateDisk) Run(_ context.Context, state multistep.StateBag) multistep ui.Say("Creating virtual machine disk") full_disk_path := filepath.Join(config.OutputDir, config.DiskName+".vmdk") - if err := driver.CreateDisk(full_disk_path, fmt.Sprintf("%dM", config.DiskSize), config.DiskTypeId); err != nil { + if err := driver.CreateDisk(full_disk_path, fmt.Sprintf("%dM", config.DiskSize), config.DiskAdapterType, config.DiskTypeId); err != nil { err := fmt.Errorf("Error creating disk: %s", err) state.Put("error", err) ui.Error(err.Error()) @@ -46,7 +46,7 @@ func (stepCreateDisk) Run(_ context.Context, state multistep.StateBag) multistep additionalpath := filepath.Join(config.OutputDir, fmt.Sprintf("%s-%d.vmdk", config.DiskName, i+1)) size := fmt.Sprintf("%dM", uint64(additionalsize)) - if err := driver.CreateDisk(additionalpath, size, config.DiskTypeId); err != nil { + if err := driver.CreateDisk(additionalpath, size, config.DiskAdapterType, config.DiskTypeId); err != nil { err := fmt.Errorf("Error creating additional disk: %s", err) state.Put("error", err) ui.Error(err.Error()) diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index 7728260ad..4c3eb58ae 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -16,11 +16,20 @@ import ( ) type vmxTemplateData struct { - Name string - GuestOS string - DiskName string - ISOPath string - Version string + Name string + GuestOS string + ISOPath string + Version string + + SCSI_Present string + SCSI_diskAdapterType string + SATA_Present string + NVME_Present string + + DiskName string + DiskType string + CDROMType string + CDROMType_MasterSlave string Network_Type string Network_Device string @@ -287,6 +296,11 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist isoPath := state.Get("iso_path").(string) ui := state.Get("ui").(packer.Ui) + // Convert the iso_path into a path relative to the .vmx file if possible + if relativeIsoPath, err := filepath.Rel(config.VMXTemplatePath, filepath.FromSlash(isoPath)); err == nil { + isoPath = relativeIsoPath + } + ui.Say("Building and writing VMX file") vmxTemplate := DefaultVMXTemplate @@ -361,6 +375,15 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist Version: config.Version, ISOPath: isoPath, + SCSI_Present: "FALSE", + SCSI_diskAdapterType: "lsilogic", + SATA_Present: "FALSE", + NVME_Present: "FALSE", + + DiskType: "scsi", + CDROMType: "ide", + CDROMType_MasterSlave: "0", + Sound_Present: map[bool]string{true: "TRUE", false: "FALSE"}[bool(config.Sound)], Usb_Present: map[bool]string{true: "TRUE", false: "FALSE"}[bool(config.USB)], @@ -368,6 +391,36 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist Parallel_Present: "FALSE", } + /// Use the disk adapter type that the user specified to tweak the .vmx + // Also sync the cdrom adapter type according to what's common for that disk type. + diskAdapterType := strings.ToLower(config.DiskAdapterType) + switch diskAdapterType { + case "ide": + templateData.DiskType = "ide" + templateData.CDROMType = "ide" + templateData.CDROMType_MasterSlave = "1" + case "sata": + templateData.SATA_Present = "TRUE" + templateData.DiskType = "sata" + templateData.CDROMType = "sata" + templateData.CDROMType_MasterSlave = "1" + case "nvme": + templateData.NVME_Present = "TRUE" + templateData.DiskType = "nvme" + templateData.SATA_Present = "TRUE" + templateData.CDROMType = "sata" + templateData.CDROMType_MasterSlave = "0" + case "scsi": + diskAdapterType = "lsilogic" + fallthrough + default: + templateData.SCSI_Present = "TRUE" + templateData.SCSI_diskAdapterType = diskAdapterType + templateData.DiskType = "scsi" + templateData.CDROMType = "ide" + templateData.CDROMType_MasterSlave = "0" + } + /// Check the network type that the user specified network := config.Network driver := state.Get("driver").(vmwcommon.Driver).GetVmwareDriver() @@ -556,9 +609,21 @@ gui.fullScreenAtPowerOn = "FALSE" gui.viewModeAtPowerOn = "windowed" hgfs.linkRootShare = "TRUE" hgfs.mapRootShare = "TRUE" -ide1:0.present = "TRUE" -ide1:0.fileName = "{{ .ISOPath }}" -ide1:0.deviceType = "cdrom-image" + +scsi0.present = "{{ .SCSI_Present }}" +scsi0.virtualDev = "{{ .SCSI_diskAdapterType }}" +scsi0.pciSlotNumber = "16" +scsi0:0.redo = "" +sata0.present = "{{ .SATA_Present }}" +nvme0.present = "{{ .NVME_Present }}" + +{{ .DiskType }}0:0.present = "TRUE" +{{ .DiskType }}0:0.fileName = "{{ .DiskName }}.vmdk" + +{{ .CDROMType }}0:{{ .CDROMType_MasterSlave }}.present = "TRUE" +{{ .CDROMType }}0:{{ .CDROMType_MasterSlave }}.fileName = "{{ .ISOPath }}" +{{ .CDROMType }}0:{{ .CDROMType_MasterSlave }}.deviceType = "cdrom-image" + isolation.tools.hgfs.disable = "FALSE" memsize = "512" nvram = "{{ .Name }}.nvram" @@ -587,12 +652,6 @@ powerType.suspend = "soft" proxyApps.publishToHost = "FALSE" replay.filename = "" replay.supported = "FALSE" -scsi0.pciSlotNumber = "16" -scsi0.present = "TRUE" -scsi0.virtualDev = "lsilogic" -scsi0:0.fileName = "{{ .DiskName }}.vmdk" -scsi0:0.present = "TRUE" -scsi0:0.redo = "" // Sound sound.startConnected = "{{ .Sound_Present }}" diff --git a/website/source/assets/images/Adobe_PDF_file_icon_24x24.png b/website/source/assets/images/Adobe_PDF_file_icon_24x24.png new file mode 100644 index 0000000000000000000000000000000000000000..a722e5b334140fcb2a527b44465dbf4ca6e05cd4 GIT binary patch literal 1288 zcmV+j1^4=iP)5)P}~C2Cc=S;&*9j zNffFDZ6Bl*O8Znwp|lTus1LpfT3TAc4+vN&eGoJ+l82T;tCmzu;tY<)iN>)rCNpzy z?#DT2@70HM?|8@Qh%VUd^Rf3@>%Z3jzt53NSL0Cy0PoSI+=?H3(~W=j@O6MQGXp?C zL{L#w6$1oA8%?3s403tvALrISi1YLFGTpPVuz)aQ=Qn#0BNnYLiTeQ2x`hY=>Rc7a zKvfX|)Zy|GHXN{i@#61#TU)<2v-Nhnt&K(_0?4HW`J1SMfR%=AMl0rA&3)<6VfsCh zMibOy{Y`KVZM7>n-Xj;+2;ch=@qwSj!8_+t@yT=R>+APzY;0WacDqhQf{*HMbged& z1v=lx-}V*U(i!CKW&Dw^QJ#Jk20i?BUndTSl&}09VuUP(^FxXoR=^3(dc8;*&2Opd z)7@_O`~7~um!_#`QI=u2xfurU_CmSvg`hwEZHV7^FqGHdg#7$bikt7Hxc(O8hreKy zwHTT(ECa(lFcK&;K@7&@JP9F~nH^nQTYEOo^Uw7AebZpLD7dJ&n2#Lep)1dyb5WJS zA09(H9jIpz0#Q6Mltdt6L<5TvRVAxs#2BT~Xp9dWIB<8CWe;_`U9>$KEU|& zOKd*=1Z5GZ9s4dN5M0W5=?wi7f29;hVNgWK4FaG7S(=fiDQTK&qtRe67|a3q(x{kI zgVEAyij56o3x=S&MqVKNU^$_^ndv)+!X?%*xTu&WlcM-)A2^{_G1bt0h*JhzO#B#E3SUkfi9Kj|ep9uf+9wNRm`KnK<4K z@UJn;6TnqC;qPQ zP(Jxb;;Dbb$_n=Wd&s2jyI8cR(+Nzoh-7svA5lK@1mWyyl4JKHJ|lnfPsqOggg2IH zJ^p*7-9bXwfx_>FKvhu@%%{Y~GlUmUP@Z^>+HW4geeUz zE{QS5k884c@4a(QcQDQE*|OOrZQ{%A3b44iNC<%o7cPuXo;>-N@p#;sCi+P&#u)4M ydTVuc^*_7*DrT$(w1Fm675Ee;KnZLD{r>^{G#E%4x}j_U0000 Virtual Disk Manager User's Guide for desktop VMware clients. + For ESXi, refer to the proper ESXi documentation. + +- `disable_vnc` (boolean) - Whether to create a VNC connection or not. A `boot_command` cannot be used when this is `false`. Defaults to `false`. - `floppy_files` (array of strings) - A list of files to place onto a floppy From eb0445ca961eac0c40c6ae0a631936fffa15247c Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Tue, 16 Jan 2018 13:17:37 -0600 Subject: [PATCH 176/251] Added support for specifying both the network adapter type and cdrom adapter type as requested by @night199uk. Also included the respective documentation for these new options. --- builder/vmware/iso/builder.go | 8 +++- builder/vmware/iso/step_create_vmx.go | 41 +++++++++++++++++-- .../source/docs/builders/vmware-iso.html.md | 12 ++++++ 3 files changed, 56 insertions(+), 5 deletions(-) diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index 5e2908964..cc5a9673b 100644 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -46,13 +46,17 @@ type Config struct { DiskTypeId string `mapstructure:"disk_type_id"` Format string `mapstructure:"format"` + // cdrom drive + CdromAdapterType string `mapstructure:"cdrom_adapter_type"` + // platform information GuestOSType string `mapstructure:"guest_os_type"` Version string `mapstructure:"version"` VMName string `mapstructure:"vm_name"` - // Network type - Network string `mapstructure:"network"` + // Network adapter and type + NetworkAdapterType string `mapstructure:"network_adapter_type"` + Network string `mapstructure:"network"` // device presence Sound bool `mapstructure:"sound"` diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index 4c3eb58ae..0eb753dc8 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -31,8 +31,9 @@ type vmxTemplateData struct { CDROMType string CDROMType_MasterSlave string - Network_Type string - Network_Device string + Network_Type string + Network_Device string + Network_Adapter string Sound_Present string Usb_Present string @@ -384,6 +385,8 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist CDROMType: "ide", CDROMType_MasterSlave: "0", + Network_Adapter: "e1000", + Sound_Present: map[bool]string{true: "TRUE", false: "FALSE"}[bool(config.Sound)], Usb_Present: map[bool]string{true: "TRUE", false: "FALSE"}[bool(config.USB)], @@ -421,6 +424,38 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist templateData.CDROMType_MasterSlave = "0" } + /// Handle the cdrom adapter type. If the disk adapter type and the + // cdrom adapter type are the same, then ensure that the cdrom is the + // slave device on whatever bus the disk adapter is on. + cdromAdapterType := strings.ToLower(config.CdromAdapterType) + if cdromAdapterType == diskAdapterType { + templateData.CDROMType_MasterSlave = "1" + } else { + templateData.CDROMType_MasterSlave = "0" + } + + switch cdromAdapterType { + case "ide": + templateData.CDROMType = "ide" + case "sata": + templateData.SATA_Present = "TRUE" + templateData.CDROMType = "sata" + case "scsi": + templateData.SCSI_Present = "TRUE" + templateData.CDROMType = "scsi" + default: + err := fmt.Errorf("Error procesing VMX template: %s", cdromAdapterType) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + /// Assign the network adapter type into the template if one was specified. + network_adapter := strings.ToLower(config.NetworkAdapterType) + if network_adapter != "" { + templateData.Network_Adapter = network_adapter + } + /// Check the network type that the user specified network := config.Network driver := state.Get("driver").(vmwcommon.Driver).GetVmwareDriver() @@ -600,7 +635,7 @@ ethernet0.displayName = "Ethernet" ethernet0.linkStatePropagation.enable = "FALSE" ethernet0.pciSlotNumber = "33" ethernet0.present = "TRUE" -ethernet0.virtualDev = "e1000" +ethernet0.virtualDev = "{{ .Network_Adapter }}" ethernet0.wakeOnPcktRcv = "FALSE" extendedConfigFile = "{{ .Name }}.vmxf" floppy0.present = "FALSE" diff --git a/website/source/docs/builders/vmware-iso.html.md b/website/source/docs/builders/vmware-iso.html.md index 0efe03a0f..2f4e3e44f 100644 --- a/website/source/docs/builders/vmware-iso.html.md +++ b/website/source/docs/builders/vmware-iso.html.md @@ -140,6 +140,12 @@ builder. Virtual Disk Manager User's Guide for desktop VMware clients. For ESXi, refer to the proper ESXi documentation. +- `cdrom_adapter_type` (string) - The adapter type (or bus) that will be used + by the cdrom device. This is chosen by default based on the disk adapter + type. VMware tends to lean towards "ide" for the cdrom device unless + "sata" is chosen for the disk adapter and so Packer attempts to mirror + this logic. This field can be specified as either "ide", "sata", or "scsi". + - `disable_vnc` (boolean) - Whether to create a VNC connection or not. A `boot_command` cannot be used when this is `false`. Defaults to `false`. @@ -207,6 +213,12 @@ builder. such as "hostonly", "nat", or "bridged". If the network is not one of these values, then it is assumed to be a VMware network device. (VMnet0..x) +- `network_adapter_type` (string) - This is the ethernet adapter type the the + virtual machine will be created with. By default the "e1000" network adapter + type will be used by Packer. For more information, please consult the + Choosing a network adapter for your virtual machine for desktop VMware + clients. For ESXi, refer to the proper ESXi documentation. + - `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` From aefe41a44ad0631d2095a6d99464f080e4f28533 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Tue, 16 Jan 2018 13:27:21 -0600 Subject: [PATCH 177/251] Fixed an issue with the previous commit so that when the user does not specify the cdrom_adapter_type to fallback to the original decision made by the disk adapter type selection. --- builder/vmware/iso/step_create_vmx.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index 0eb753dc8..ffff9d999 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -428,7 +428,9 @@ func (s *stepCreateVMX) Run(_ context.Context, state multistep.StateBag) multist // cdrom adapter type are the same, then ensure that the cdrom is the // slave device on whatever bus the disk adapter is on. cdromAdapterType := strings.ToLower(config.CdromAdapterType) - if cdromAdapterType == diskAdapterType { + if cdromAdapterType == "" { + cdromAdapterType = templateData.CDROMType + } else if cdromAdapterType == diskAdapterType { templateData.CDROMType_MasterSlave = "1" } else { templateData.CDROMType_MasterSlave = "0" From fa2dddd26d1584f952f6d1bf71db14415d43bd70 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Fri, 2 Feb 2018 19:45:18 -0600 Subject: [PATCH 178/251] Fixed some things mucked up during rebase. --- builder/vmware/common/step_type_boot_command.go | 1 - builder/vmware/iso/driver_esx5.go | 1 - 2 files changed, 2 deletions(-) diff --git a/builder/vmware/common/step_type_boot_command.go b/builder/vmware/common/step_type_boot_command.go index c169c59eb..02404af91 100644 --- a/builder/vmware/common/step_type_boot_command.go +++ b/builder/vmware/common/step_type_boot_command.go @@ -7,7 +7,6 @@ import ( "net" "os" "regexp" - "runtime" "strings" "time" "unicode" diff --git a/builder/vmware/iso/driver_esx5.go b/builder/vmware/iso/driver_esx5.go index 40cc35eb1..1dc352999 100644 --- a/builder/vmware/iso/driver_esx5.go +++ b/builder/vmware/iso/driver_esx5.go @@ -19,7 +19,6 @@ import ( "github.com/hashicorp/packer/communicator/ssh" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" gossh "golang.org/x/crypto/ssh" ) From c366a1e1607aae9ca84bb8a7acc65808f81b37d1 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Fri, 2 Feb 2018 20:16:54 -0600 Subject: [PATCH 179/251] Inverted the logic of FileExistsLocally as suggested by @SwampDragons as remote URLs are assumed to exist locally. --- common/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/config.go b/common/config.go index 269d52dd1..aa632ceae 100644 --- a/common/config.go +++ b/common/config.go @@ -195,7 +195,7 @@ func FileExistsLocally(original string) bool { // Check to see that it's got a Local way of doing things. local, ok := d.(LocalDownloader) if !ok { - return false + return true // XXX: Remote URLs short-circuit this logic. } // Figure out where we're at. From efc97dbda2fedd12cb8825db3cdaff2017159654 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Fri, 2 Feb 2018 20:29:10 -0600 Subject: [PATCH 180/251] Fixed TestFileExistsLocally tests in common/config_test.go so that they're actually being run. Added a non-existent-protocol:// test. --- common/config_test.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/common/config_test.go b/common/config_test.go index 35697291d..29574ee4e 100644 --- a/common/config_test.go +++ b/common/config_test.go @@ -285,7 +285,7 @@ func TestDownloadableURL_FilePaths(t *testing.T) { } } -func test_FileExistsLocally(t *testing.T) { +func TestFileExistsLocally(t *testing.T) { portablepath := GetPortablePathToTestFixtures(t) dirCases := []struct { @@ -294,15 +294,17 @@ func test_FileExistsLocally(t *testing.T) { }{ // file exists locally {fmt.Sprintf("file:///%s/SomeDir/myfile.txt", portablepath), true}, - // file is not supposed to exist locally + // remote protocols short-circuit and are considered to exist locally {"https://myfile.iso", true}, + // non-existent protocols do not exist and hence fail + {"nonexistent-protocol://myfile.iso", false}, // file does not exist locally {"file:///C/i/dont/exist", false}, } // Run through test cases to make sure they all parse correctly for _, tc := range dirCases { fileOK := FileExistsLocally(tc.Input) - if !fileOK { + if fileOK != tc.Output { t.Fatalf("Test Case failed: Expected %#v, received = %#v, input = %s", tc.Output, fileOK, tc.Input) } From d4b00b722add485cf9472bf47efa61a4854922e4 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Fri, 2 Feb 2018 20:36:08 -0600 Subject: [PATCH 181/251] Removed an extra '/' from the TestFileExistsLocally test in common/config_test.go --- common/config_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/config_test.go b/common/config_test.go index 29574ee4e..8f170fe1d 100644 --- a/common/config_test.go +++ b/common/config_test.go @@ -293,7 +293,7 @@ func TestFileExistsLocally(t *testing.T) { Output bool }{ // file exists locally - {fmt.Sprintf("file:///%s/SomeDir/myfile.txt", portablepath), true}, + {fmt.Sprintf("file://%s/SomeDir/myfile.txt", portablepath), true}, // remote protocols short-circuit and are considered to exist locally {"https://myfile.iso", true}, // non-existent protocols do not exist and hence fail From 9eb2f3742941c2370864b445dad0b863efda936b Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Fri, 2 Feb 2018 20:44:22 -0600 Subject: [PATCH 182/251] Ack! Forgot to include the test-fixtures/SomeDir/myfile.txt file... --- common/test-fixtures/SomeDir/myfile.txt | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 common/test-fixtures/SomeDir/myfile.txt diff --git a/common/test-fixtures/SomeDir/myfile.txt b/common/test-fixtures/SomeDir/myfile.txt new file mode 100644 index 000000000..e69de29bb From eaef2961cb40b214cb744214998a02097053c00f Mon Sep 17 00:00:00 2001 From: Petr Hosek Date: Mon, 18 Dec 2017 12:51:39 -0800 Subject: [PATCH 183/251] Support specifying licenses for Google Compute images This is needed to enable features such as the nested virtualization: https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances --- builder/googlecompute/config.go | 1 + builder/googlecompute/config_test.go | 3 +++ builder/googlecompute/driver.go | 2 +- builder/googlecompute/driver_gce.go | 3 ++- builder/googlecompute/driver_mock.go | 7 ++++--- builder/googlecompute/step_create_image.go | 2 +- builder/googlecompute/step_create_image_test.go | 3 +-- 7 files changed, 13 insertions(+), 8 deletions(-) diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go index 8ed3897d6..c8863d4d9 100644 --- a/builder/googlecompute/config.go +++ b/builder/googlecompute/config.go @@ -36,6 +36,7 @@ type Config struct { ImageDescription string `mapstructure:"image_description"` ImageFamily string `mapstructure:"image_family"` ImageLabels map[string]string `mapstructure:"image_labels"` + ImageLicenses []string `mapstructure:"image_licenses"` InstanceName string `mapstructure:"instance_name"` Labels map[string]string `mapstructure:"labels"` MachineType string `mapstructure:"machine_type"` diff --git a/builder/googlecompute/config_test.go b/builder/googlecompute/config_test.go index 551692b29..a4b862a1a 100644 --- a/builder/googlecompute/config_test.go +++ b/builder/googlecompute/config_test.go @@ -309,6 +309,9 @@ func testConfig(t *testing.T) map[string]interface{} { "label-1": "value-1", "label-2": "value-2", }, + "image_licenses": []string{ + "test-license", + }, "zone": "us-east1-a", } } diff --git a/builder/googlecompute/driver.go b/builder/googlecompute/driver.go index bd302ddc0..fdb7d9442 100644 --- a/builder/googlecompute/driver.go +++ b/builder/googlecompute/driver.go @@ -11,7 +11,7 @@ import ( type Driver interface { // CreateImage creates an image from the given disk in Google Compute // Engine. - CreateImage(name, description, family, zone, disk string, image_labels map[string]string) (<-chan *Image, <-chan error) + CreateImage(name, description, family, zone, disk string, image_labels map[string]string, image_licenses []string) (<-chan *Image, <-chan error) // DeleteImage deletes the image with the given name. DeleteImage(name string) <-chan error diff --git a/builder/googlecompute/driver_gce.go b/builder/googlecompute/driver_gce.go index 44dcb85c6..c84ab222f 100644 --- a/builder/googlecompute/driver_gce.go +++ b/builder/googlecompute/driver_gce.go @@ -97,12 +97,13 @@ func NewDriverGCE(ui packer.Ui, p string, a *AccountFile) (Driver, error) { }, nil } -func (d *driverGCE) CreateImage(name, description, family, zone, disk string, image_labels map[string]string) (<-chan *Image, <-chan error) { +func (d *driverGCE) CreateImage(name, description, family, zone, disk string, image_labels map[string]string, image_licenses []string) (<-chan *Image, <-chan error) { gce_image := &compute.Image{ Description: description, Name: name, Family: family, Labels: image_labels, + Licenses: image_licenses, SourceDisk: fmt.Sprintf("%s%s/zones/%s/disks/%s", d.service.BasePath, d.projectId, zone, disk), SourceType: "RAW", } diff --git a/builder/googlecompute/driver_mock.go b/builder/googlecompute/driver_mock.go index 895775da5..720f9d60b 100644 --- a/builder/googlecompute/driver_mock.go +++ b/builder/googlecompute/driver_mock.go @@ -9,9 +9,9 @@ type DriverMock struct { CreateImageDesc string CreateImageFamily string CreateImageLabels map[string]string + CreateImageLicenses []string CreateImageZone string CreateImageDisk string - CreateImageResultLicenses []string CreateImageResultProjectId string CreateImageResultSelfLink string CreateImageResultSizeGb int64 @@ -82,11 +82,12 @@ type DriverMock struct { WaitForInstanceErrCh <-chan error } -func (d *DriverMock) CreateImage(name, description, family, zone, disk string, image_labels map[string]string) (<-chan *Image, <-chan error) { +func (d *DriverMock) CreateImage(name, description, family, zone, disk string, image_labels map[string]string, image_licenses []string) (<-chan *Image, <-chan error) { d.CreateImageName = name d.CreateImageDesc = description d.CreateImageFamily = family d.CreateImageLabels = image_labels + d.CreateImageLicenses = image_licenses d.CreateImageZone = zone d.CreateImageDisk = disk if d.CreateImageResultProjectId == "" { @@ -106,7 +107,7 @@ func (d *DriverMock) CreateImage(name, description, family, zone, disk string, i ch := make(chan *Image, 1) ch <- &Image{ Labels: d.CreateImageLabels, - Licenses: d.CreateImageResultLicenses, + Licenses: d.CreateImageLicenses, Name: name, ProjectId: d.CreateImageResultProjectId, SelfLink: d.CreateImageResultSelfLink, diff --git a/builder/googlecompute/step_create_image.go b/builder/googlecompute/step_create_image.go index 72e6a06eb..c2bde16c8 100644 --- a/builder/googlecompute/step_create_image.go +++ b/builder/googlecompute/step_create_image.go @@ -40,7 +40,7 @@ func (s *StepCreateImage) Run(_ context.Context, state multistep.StateBag) multi imageCh, errCh := driver.CreateImage( config.ImageName, config.ImageDescription, config.ImageFamily, config.Zone, - config.DiskName, config.ImageLabels) + config.DiskName, config.ImageLabels, config.ImageLicenses) var err error select { case err = <-errCh: diff --git a/builder/googlecompute/step_create_image_test.go b/builder/googlecompute/step_create_image_test.go index 6f1121a9f..65ccca313 100644 --- a/builder/googlecompute/step_create_image_test.go +++ b/builder/googlecompute/step_create_image_test.go @@ -22,7 +22,6 @@ func TestStepCreateImage(t *testing.T) { d := state.Get("driver").(*DriverMock) // These are the values of the image the driver will return. - d.CreateImageResultLicenses = []string{"test-license"} d.CreateImageResultProjectId = "test-project" d.CreateImageResultSizeGb = 100 @@ -36,7 +35,6 @@ func TestStepCreateImage(t *testing.T) { assert.True(t, ok, "Image in state is not an Image.") // Verify created Image results. - assert.Equal(t, image.Licenses, d.CreateImageResultLicenses, "Created image licenses don't match the licenses returned by the driver.") assert.Equal(t, image.Name, c.ImageName, "Created image does not match config name.") assert.Equal(t, image.ProjectId, d.CreateImageResultProjectId, "Created image project does not match driver project.") assert.Equal(t, image.SizeGb, d.CreateImageResultSizeGb, "Created image size does not match the size returned by the driver.") @@ -48,6 +46,7 @@ func TestStepCreateImage(t *testing.T) { assert.Equal(t, d.CreateImageZone, c.Zone, "Incorrect image zone passed to driver.") assert.Equal(t, d.CreateImageDisk, c.DiskName, "Incorrect disk passed to driver.") assert.Equal(t, d.CreateImageLabels, c.ImageLabels, "Incorrect image_labels passed to driver.") + assert.Equal(t, d.CreateImageLicenses, c.ImageLicenses, "Incorrect image_licenses passed to driver.") } func TestStepCreateImage_errorOnChannel(t *testing.T) { From 1c9f18603c475ce83d8649200c0b57dd6855c771 Mon Sep 17 00:00:00 2001 From: Sean Malloy Date: Sat, 3 Feb 2018 15:32:10 -0600 Subject: [PATCH 184/251] Fix Google Compute Builder Documentation Added required configuration option ssh_username to basic config example. --- website/source/docs/builders/googlecompute.html.md | 1 + 1 file changed, 1 insertion(+) diff --git a/website/source/docs/builders/googlecompute.html.md b/website/source/docs/builders/googlecompute.html.md index 1c4717768..e8147e966 100644 --- a/website/source/docs/builders/googlecompute.html.md +++ b/website/source/docs/builders/googlecompute.html.md @@ -109,6 +109,7 @@ is assumed to be the path to the file containing the JSON. "account_file": "account.json", "project_id": "my project", "source_image": "debian-7-wheezy-v20150127", + "ssh_username": "packer", "zone": "us-central1-a" } ] From c75db88f6b8e7ff1b8a0c7cffebc77c9ae02a0f2 Mon Sep 17 00:00:00 2001 From: Sean Malloy Date: Sat, 3 Feb 2018 15:56:29 -0600 Subject: [PATCH 185/251] Add documentation for new googlecompute builder image_licenses configuration option --- .../docs/builders/googlecompute.html.md | 63 ++++++++++++++----- 1 file changed, 46 insertions(+), 17 deletions(-) diff --git a/website/source/docs/builders/googlecompute.html.md b/website/source/docs/builders/googlecompute.html.md index e8147e966..d9be8cd28 100644 --- a/website/source/docs/builders/googlecompute.html.md +++ b/website/source/docs/builders/googlecompute.html.md @@ -93,7 +93,9 @@ Packer looks for credentials in the following places, preferring the first locat 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches credentials from the metadata server. (Needs a correct VM authentication scope configuration, see above) -## Basic Example +## Examples + +### Basic Example Below is a fully functioning example. It doesn't do anything useful, since no provisioners or startup-script metadata are defined, but it will effectively @@ -123,25 +125,50 @@ user used to connect in a startup-script. ``` {.json} { - "builders": [{ - "type": "googlecompute", - "account_file": "account.json", - "project_id": "my project", - "source_image": "windows-server-2016-dc-v20170227", - "disk_size": "50", - "machine_type": "n1-standard-1", - "communicator": "winrm", - "winrm_username": "packer_user", - "winrm_insecure": true, - "winrm_use_ssl": true, - "metadata": { - "windows-startup-script-cmd": "winrm quickconfig -quiet & net user /add packer_user & net localgroup administrators packer_user /add & winrm set winrm/config/service/auth @{Basic=\"true\"}" - }, - "zone": "us-central1-a" - }] + "builders": [ + { + "type": "googlecompute", + "account_file": "account.json", + "project_id": "my project", + "source_image": "windows-server-2016-dc-v20170227", + "disk_size": "50", + "machine_type": "n1-standard-1", + "communicator": "winrm", + "winrm_username": "packer_user", + "winrm_insecure": true, + "winrm_use_ssl": true, + "metadata": { + "windows-startup-script-cmd": "winrm quickconfig -quiet & net user /add packer_user & net localgroup administrators packer_user /add & winrm set winrm/config/service/auth @{Basic=\"true\"}" + }, + "zone": "us-central1-a" + } + ] } ``` +### Nested Hypervisor Example + +This is an example of using the `image_licenses` configuration option to create a GCE image that has nested virtualization enabled. See +[Enabling Nested Virtualization for VM Instances](https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances) +for details. + +``` json +{ + "builders": [ + { + "type": "googlecompute", + "account_file": "account.json", + "project_id": "my project", + "source_image_family": "centos-7", + "ssh_username": "packer", + "zone": "us-central1-a", + "image_licenses": ["projects/vm-options/global/licenses/enable-vmx"] + } + ] +} + +``` + ## Configuration Reference Configuration options are organized below into two categories: required and @@ -202,6 +229,8 @@ builder. - `image_labels` (object of key/value strings) - Key/value pair labels to apply to the created image. +- `image_licenses` (array of strings) - Licenses to apply to the created image. + - `image_name` (string) - The unique name of the resulting image. Defaults to `"packer-{{timestamp}}"`. From 22deb5045dceb96afaca506299f4bf60dd92bd52 Mon Sep 17 00:00:00 2001 From: Sean Malloy Date: Sun, 4 Feb 2018 01:16:34 -0600 Subject: [PATCH 186/251] Remove extra new line from googlecompute docs --- website/source/docs/builders/googlecompute.html.md | 1 - 1 file changed, 1 deletion(-) diff --git a/website/source/docs/builders/googlecompute.html.md b/website/source/docs/builders/googlecompute.html.md index d9be8cd28..cda97b02c 100644 --- a/website/source/docs/builders/googlecompute.html.md +++ b/website/source/docs/builders/googlecompute.html.md @@ -166,7 +166,6 @@ for details. } ] } - ``` ## Configuration Reference From 8827df1ed2e534d0632bd388a2b131135ed0892e Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Mon, 5 Feb 2018 14:21:28 -0800 Subject: [PATCH 187/251] update go-aws-sdk to v1.12.71 --- vendor/github.com/aws/aws-sdk-go/CHANGELOG.md | 1119 ++++++++ .../github.com/aws/aws-sdk-go/CONTRIBUTING.md | 2 +- vendor/github.com/aws/aws-sdk-go/Gopkg.lock | 20 + vendor/github.com/aws/aws-sdk-go/Gopkg.toml | 48 + vendor/github.com/aws/aws-sdk-go/Makefile | 9 +- vendor/github.com/aws/aws-sdk-go/README.md | 6 +- .../aws-sdk-go/aws/client/default_retryer.go | 58 +- .../github.com/aws/aws-sdk-go/aws/config.go | 2 +- .../aws/aws-sdk-go/aws/defaults/defaults.go | 34 +- .../aws/aws-sdk-go/aws/endpoints/decode.go | 24 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 402 ++- .../aws/aws-sdk-go/aws/request/request.go | 146 +- .../aws/request/request_pagination.go | 25 +- .../aws/aws-sdk-go/aws/session/env_config.go | 5 +- .../aws/aws-sdk-go/aws/signer/v4/v4.go | 18 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../github.com/aws/aws-sdk-go/buildspec.yml | 21 + vendor/github.com/aws/aws-sdk-go/doc.go | 2 +- .../private/protocol/json/jsonutil/build.go | 21 +- .../protocol/json/jsonutil/unmarshal.go | 15 +- .../aws-sdk-go/private/protocol/jsonvalue.go | 76 + .../protocol/query/queryutil/queryutil.go | 4 + .../aws-sdk-go/private/protocol/rest/build.go | 21 +- .../private/protocol/rest/unmarshal.go | 14 +- .../aws/aws-sdk-go/service/ec2/api.go | 60 +- .../aws/aws-sdk-go/service/ec2/doc.go | 5 +- .../aws/aws-sdk-go/service/ecr/api.go | 1426 +++++++++- .../aws/aws-sdk-go/service/ecr/doc.go | 12 +- .../aws/aws-sdk-go/service/ecr/errors.go | 23 +- .../aws/aws-sdk-go/service/s3/api.go | 2340 ++++++++++++++--- .../aws/aws-sdk-go/service/s3/doc.go | 2 +- .../aws/aws-sdk-go/service/s3/doc_custom.go | 4 +- .../service/s3/s3iface/interface.go | 12 + .../aws-sdk-go/service/s3/s3manager/batch.go | 14 +- .../aws-sdk-go/service/s3/s3manager/upload.go | 10 +- .../aws/aws-sdk-go/service/sts/api.go | 62 +- .../aws/aws-sdk-go/service/sts/doc.go | 2 +- vendor/vendor.json | 304 +-- 38 files changed, 5557 insertions(+), 813 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go/Gopkg.lock create mode 100644 vendor/github.com/aws/aws-sdk-go/Gopkg.toml create mode 100644 vendor/github.com/aws/aws-sdk-go/buildspec.yml create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go diff --git a/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md index b52655b67..0fc7f9c88 100644 --- a/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md @@ -1,3 +1,1122 @@ +Release v1.12.71 (2018-02-05) +=== + +### Service Client Updates +* `service/acm`: Updates service documentation + * Documentation updates for acm +* `service/cloud9`: Updates service documentation and examples + * API usage examples for AWS Cloud9. +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/kinesis`: Updates service API and documentation + * Using ListShards a Kinesis Data Streams customer or client can get information about shards in a data stream (including meta-data for each shard) without obtaining data stream level information. +* `service/opsworks`: Updates service API, documentation, and waiters + * AWS OpsWorks Stacks supports EBS encryption and HDD volume types. Also, a new DescribeOperatingSystems API is available, which lists all operating systems supported by OpsWorks Stacks. + +Release v1.12.70 (2018-01-26) +=== + +### Service Client Updates +* `service/devicefarm`: Updates service API and documentation + * Add InteractionMode in CreateRemoteAccessSession for DirectDeviceAccess feature. +* `service/medialive`: Updates service API and documentation + * Add InputSpecification to CreateChannel (specification of input attributes is used for channel sizing and affects pricing); add NotFoundException to DeleteInputSecurityGroups. +* `service/mturk-requester`: Updates service documentation + +Release v1.12.69 (2018-01-26) +=== + +### SDK Bugs +* `models/api`: Fix colliding names [#1754](https://github.com/aws/aws-sdk-go/pull/1754) [#1756](https://github.com/aws/aws-sdk-go/pull/1756) + * SDK had duplicate folders that were causing errors in some builds. + * Fixes [#1753](https://github.com/aws/aws-sdk-go/issues/1753) +Release v1.12.68 (2018-01-25) +=== + +### Service Client Updates +* `service/alexaforbusiness`: Updates service API and documentation +* `service/codebuild`: Updates service API and documentation + * Adding support for Shallow Clone and GitHub Enterprise in AWS CodeBuild. +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/guardduty`: Adds new service + * Added the missing AccessKeyDetails object to the resource shape. +* `service/lambda`: Updates service API and documentation + * AWS Lambda now supports Revision ID on your function versions and aliases, to track and apply conditional updates when you are updating your function version or alias resources. + +### SDK Bugs +* `service/s3/s3manager`: Fix check for nil OrigErr in Error() [#1749](https://github.com/aws/aws-sdk-go/issues/1749) + * S3 Manager's `Error` type did not check for nil of `OrigErr` when calling `Error()` + * Fixes [#1748](https://github.com/aws/aws-sdk-go/issues/1748) +Release v1.12.67 (2018-01-22) +=== + +### Service Client Updates +* `service/budgets`: Updates service API and documentation + * Add additional costTypes: IncludeDiscount, UseAmortized, to support finer control for different charges included in a cost budget. + +Release v1.12.66 (2018-01-19) +=== + +### Service Client Updates +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/glue`: Updates service API and documentation + * New AWS Glue DataCatalog APIs to manage table versions and a new feature to skip archiving of the old table version when updating table. +* `service/transcribe`: Adds new service + +Release v1.12.65 (2018-01-18) +=== + +### Service Client Updates +* `service/sagemaker`: Updates service API and documentation + * CreateTrainingJob and CreateEndpointConfig now supports KMS Key for volume encryption. + +Release v1.12.64 (2018-01-17) +=== + +### Service Client Updates +* `service/autoscaling-plans`: Updates service documentation +* `service/ec2`: Updates service documentation + * Documentation updates for EC2 + +Release v1.12.63 (2018-01-17) +=== + +### Service Client Updates +* `service/application-autoscaling`: Updates service API and documentation +* `service/autoscaling-plans`: Adds new service +* `service/rds`: Updates service API and documentation + * With this release you can now integrate RDS DB instances with CloudWatch Logs. We have added parameters to the operations for creating and modifying DB instances (for example CreateDBInstance) to allow you to take advantage of this capability through the CLI and API. Once you enable this feature, a stream of log events will publish to CloudWatch Logs for each log type you enable. + +Release v1.12.62 (2018-01-15) +=== + +### Service Client Updates +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/lambda`: Updates service API and documentation + * Support for creating Lambda Functions using 'dotnetcore2.0' and 'go1.x'. + +Release v1.12.61 (2018-01-12) +=== + +### Service Client Updates +* `service/glue`: Updates service API and documentation + * Support is added to generate ETL scripts in Scala which can now be run by AWS Glue ETL jobs. In addition, the trigger API now supports firing when any conditions are met (in addition to all conditions). Also, jobs can be triggered based on a "failed" or "stopped" job run (in addition to a "succeeded" job run). + +Release v1.12.60 (2018-01-11) +=== + +### Service Client Updates +* `service/elasticloadbalancing`: Updates service API and documentation +* `service/elasticloadbalancingv2`: Updates service API and documentation +* `service/rds`: Updates service API and documentation + * Read Replicas for Amazon RDS for MySQL, MariaDB, and PostgreSQL now support Multi-AZ deployments.Amazon RDS Read Replicas enable you to create one or more read-only copies of your database instance within the same AWS Region or in a different AWS Region. Updates made to the source database are asynchronously copied to the Read Replicas. In addition to providing scalability for read-heavy workloads, you can choose to promote a Read Replica to become standalone a DB instance when needed.Amazon RDS Multi-AZ Deployments provide enhanced availability for database instances within a single AWS Region. With Multi-AZ, your data is synchronously replicated to a standby in a different Availability Zone (AZ). In case of an infrastructure failure, Amazon RDS performs an automatic failover to the standby, minimizing disruption to your applications.You can now combine Read Replicas with Multi-AZ as part of a disaster recovery strategy for your production databases. A well-designed and tested plan is critical for maintaining business continuity after a disaster. Since Read Replicas can also be created in different regions than the source database, your Read Replica can be promoted to become the new production database in case of a regional disruption.You can also combine Read Replicas with Multi-AZ for your database engine upgrade process. You can create a Read Replica of your production database instance and upgrade it to a new database engine version. When the upgrade is complete, you can stop applications, promote the Read Replica to a standalone database instance and switch over your applications. Since the database instance is already a Multi-AZ deployment, no additional steps are needed.For more information, see the Amazon RDS User Guide. +* `service/ssm`: Updates service documentation + * Updates documentation for the HierarchyLevelLimitExceededException error. + +Release v1.12.59 (2018-01-09) +=== + +### Service Client Updates +* `service/kms`: Updates service documentation + * Documentation updates for AWS KMS + +Release v1.12.58 (2018-01-09) +=== + +### Service Client Updates +* `service/ds`: Updates service API and documentation + * On October 24 we introduced AWS Directory Service for Microsoft Active Directory (Standard Edition), also known as AWS Microsoft AD (Standard Edition), which is a managed Microsoft Active Directory (AD) that is optimized for small and midsize businesses (SMBs). With this SDK release, you can now create an AWS Microsoft AD directory using API. This enables you to run typical SMB workloads using a cost-effective, highly available, and managed Microsoft AD in the AWS Cloud. + +Release v1.12.57 (2018-01-08) +=== + +### Service Client Updates +* `service/codedeploy`: Updates service API and documentation + * The AWS CodeDeploy API was updated to support DeleteGitHubAccountToken, a new method that deletes a GitHub account connection. +* `service/discovery`: Updates service API and documentation + * Documentation updates for AWS Application Discovery Service. +* `service/route53`: Updates service API and documentation + * This release adds an exception to the CreateTrafficPolicyVersion API operation. + +Release v1.12.56 (2018-01-05) +=== + +### Service Client Updates +* `service/inspector`: Updates service API, documentation, and examples + * Added 2 new attributes to the DescribeAssessmentTemplate response, indicating the total number of assessment runs and last assessment run ARN (if present.) +* `service/snowball`: Updates service documentation + * Documentation updates for snowball +* `service/ssm`: Updates service documentation + * Documentation updates for ssm + +Release v1.12.55 (2018-01-02) +=== + +### Service Client Updates +* `service/rds`: Updates service documentation + * Documentation updates for rds + +Release v1.12.54 (2017-12-29) +=== + +### Service Client Updates +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/workspaces`: Updates service API and documentation + * Modify WorkSpaces have been updated with flexible storage and switching of hardware bundles feature. The following configurations have been added to ModifyWorkSpacesProperties: storage and compute. This update provides the capability to configure the storage of a WorkSpace. It also adds the capability of switching hardware bundle of a WorkSpace by specifying an eligible compute (Value, Standard, Performance, Power). + +Release v1.12.53 (2017-12-22) +=== + +### Service Client Updates +* `service/ec2`: Updates service API + * This release fixes an issue with tags not showing in DescribeAddresses responses. +* `service/ecs`: Updates service API and documentation + * Amazon ECS users can now set a health check initialization wait period of their ECS services, the services that are associated with an Elastic Load Balancer (ELB) will wait for a period of time before the ELB become healthy. You can now configure this in Create and Update Service. +* `service/inspector`: Updates service API and documentation + * PreviewAgents API now returns additional fields within the AgentPreview data type. The API now shows the agent health and availability status for all instances included in the assessment target. This allows users to check the health status of Inspector Agents before running an assessment. In addition, it shows the instance ID, hostname, and IP address of the targeted instances. +* `service/sagemaker`: Updates service API and documentation + * SageMaker Models no longer support SupplementalContainers. API's that have been affected are CreateModel and DescribeModel. + +Release v1.12.52 (2017-12-21) +=== + +### Service Client Updates +* `service/codebuild`: Updates service API and documentation + * Adding support allowing AWS CodeBuild customers to select specific curated image versions. +* `service/ec2`: Updates service API and documentation + * Elastic IP tagging enables you to add key and value metadata to your Elastic IPs so that you can search, filter, and organize them according to your organization's needs. +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/kinesisanalytics`: Updates service API and documentation + * Kinesis Analytics now supports AWS Lambda functions as output. + +Release v1.12.51 (2017-12-21) +=== + +### Service Client Updates +* `service/config`: Updates service API +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/iot`: Updates service API and documentation + * This release adds support for code signed Over-the-air update functionality for Amazon FreeRTOS. Users can now create and schedule Over-the-air updates to their Amazon FreeRTOS devices using these new APIs. + +Release v1.12.50 (2017-12-19) +=== + +### Service Client Updates +* `service/apigateway`: Updates service API and documentation + * API Gateway now adds support for calling API with compressed payloads using one of the supported content codings, tagging an API stage for cost allocation, and returning API keys from a custom authorizer for use with a usage plan. +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/mediastore-data`: Updates service documentation +* `service/route53`: Updates service API and documentation + * Route 53 added support for a new China (Ningxia) region, cn-northwest-1. You can now specify cn-northwest-1 as the region for latency-based or geoproximity routing. Route 53 also added support for a new EU (Paris) region, eu-west-3. You can now associate VPCs in eu-west-3 with private hosted zones and create alias records that route traffic to resources in eu-west-3. + +Release v1.12.49 (2017-12-19) +=== + +### Service Client Updates +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/monitoring`: Updates service documentation + * Documentation updates for monitoring + +Release v1.12.48 (2017-12-15) +=== + +### Service Client Updates +* `service/appstream`: Updates service API and documentation + * This API update is to enable customers to add tags to their Amazon AppStream 2.0 resources + +Release v1.12.47 (2017-12-14) +=== + +### Service Client Updates +* `service/apigateway`: Updates service API and documentation + * Adds support for Cognito Authorizer scopes at the API method level. +* `service/email`: Updates service documentation + * Added information about the maximum number of transactions per second for the SendCustomVerificationEmail operation. +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.12.46 (2017-12-12) +=== + +### Service Client Updates +* `service/workmail`: Adds new service + * Today, Amazon WorkMail released an administrative SDK and enabled AWS CloudTrail integration. With the administrative SDK, you can natively integrate WorkMail with your existing services. The SDK enables programmatic user, resource, and group management through API calls. This means your existing IT tools and workflows can now automate WorkMail management, and third party applications can streamline WorkMail migrations and account actions. + +Release v1.12.45 (2017-12-11) +=== + +### Service Client Updates +* `service/cognito-idp`: Updates service API and documentation +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/lex-models`: Updates service API and documentation +* `service/sagemaker`: Updates service API + * CreateModel API Update: The request parameter 'ExecutionRoleArn' has changed from optional to required. + +Release v1.12.44 (2017-12-08) +=== + +### Service Client Updates +* `service/appstream`: Updates service API and documentation + * This API update is to support the feature that allows customers to automatically consume the latest Amazon AppStream 2.0 agent as and when published by AWS. +* `service/ecs`: Updates service documentation + * Documentation updates for Windows containers. +* `service/monitoring`: Updates service API and documentation + * With this launch, you can now create a CloudWatch alarm that alerts you when M out of N datapoints of a metric are breaching your predefined threshold, such as three out of five times in any given five minutes interval or two out of six times in a thirty minutes interval. When M out of N datapoints are not breaching your threshold in an interval, the alarm will be in OK state. Please note that the M datapoints out of N datapoints in an interval can be of any order and does not need to be consecutive. Consequently, you can now get alerted even when the spikes in your metrics are intermittent over an interval. + +Release v1.12.43 (2017-12-07) +=== + +### Service Client Updates +* `service/email`: Updates service API, documentation, and paginators + * Customers can customize the emails that Amazon SES sends when verifying new identities. This feature is helpful for developers whose applications send email through Amazon SES on behalf of their customers. +* `service/es`: Updates service API and documentation + * Added support for encryption of data at rest on Amazon Elasticsearch Service using AWS KMS + +### SDK Bugs +* `models/apis` Fixes removes colliding sagemaker models folders ([#1686](https://github.com/aws/aws-sdk-go/pull/1686)) + * Fixes Release v1.12.42's SageMaker vs sagemaker model folders. +Release v1.12.42 (2017-12-06) +=== + +### Service Client Updates +* `service/clouddirectory`: Updates service API and documentation + * Amazon Cloud Directory makes it easier for you to apply schema changes across your directories with in-place schema upgrades. Your directories now remain available while backward-compatible schema changes are being applied, such as the addition of new fields. You also can view the history of your schema changes in Cloud Directory by using both major and minor version identifiers, which can help you track and audit schema versions across directories. +* `service/elasticbeanstalk`: Updates service documentation + * Documentation updates for AWS Elastic Beanstalk. +* `service/sagemaker`: Adds new service + * Initial waiters for common SageMaker workflows. + +Release v1.12.41 (2017-12-05) +=== + +### Service Client Updates +* `service/iot`: Updates service API and documentation + * Add error action API for RulesEngine. +* `service/servicecatalog`: Updates service API and documentation + * ServiceCatalog has two distinct personas for its use, an "admin" persona (who creates sets of products with different versions and prescribes who has access to them) and an "end-user" persona (who can launch cloud resources based on the configuration data their admins have given them access to). This API update will allow admin users to deactivate/activate product versions, end-user will only be able to access and launch active product versions. +* `service/servicediscovery`: Adds new service + * Amazon Route 53 Auto Naming lets you configure public or private namespaces that your microservice applications run in. When instances of the service become available, you can call the Auto Naming API to register the instance, and Amazon Route 53 automatically creates up to five DNS records and an optional health check. Clients that submit DNS queries for the service receive an answer that contains up to eight healthy records. + +Release v1.12.40 (2017-12-04) +=== + +### Service Client Updates +* `service/budgets`: Updates service API and documentation + * Add additional costTypes to support finer control for different charges included in a cost budget. +* `service/ecs`: Updates service documentation + * Documentation updates for ecs + +Release v1.12.39 (2017-12-01) +=== + +### Service Client Updates +* `service/SageMaker`: Updates service waiters + +Release v1.12.38 (2017-11-30) +=== + +### Service Client Updates +* `service/AWSMoneypenny`: Adds new service +* `service/Cloud9`: Adds new service +* `service/Serverless Registry`: Adds new service +* `service/apigateway`: Updates service API, documentation, and paginators + * Added support Private Integration and VPC Link features in API Gateway. This allows to create an API with the API Gateway private integration, thus providing clients access to HTTP/HTTPS resources in an Amazon VPC from outside of the VPC through a VpcLink resource. +* `service/ec2`: Updates service API and documentation + * Adds the following updates: 1. Spread Placement ensures that instances are placed on distinct hardware in order to reduce correlated failures. 2. Inter-region VPC Peering allows customers to peer VPCs across different AWS regions without requiring additional gateways, VPN connections or physical hardware +* `service/lambda`: Updates service API and documentation + * AWS Lambda now supports the ability to set the concurrency limits for individual functions, and increasing memory to 3008 MB. + +Release v1.12.37 (2017-11-30) +=== + +### Service Client Updates +* `service/Ardi`: Adds new service +* `service/autoscaling`: Updates service API and documentation + * You can now use Auto Scaling with EC2 Launch Templates via the CreateAutoScalingGroup and UpdateAutoScalingGroup APIs. +* `service/ec2`: Updates service API and documentation + * Adds the following updates: 1. T2 Unlimited enables high CPU performance for any period of time whenever required 2. You are now able to create and launch EC2 m5 and h1 instances +* `service/lightsail`: Updates service API and documentation + * This release adds support for load balancer and TLS/SSL certificate management. This set of APIs allows customers to create, manage, and scale secure load balanced applications on Lightsail infrastructure. To provide support for customers who manage their DNS on Lightsail, we've added the ability create an Alias A type record which can point to a load balancer DNS name via the CreateDomainEntry API http://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_CreateDomainEntry.html. +* `service/ssm`: Updates service API and documentation + * This release updates AWS Systems Manager APIs to enable executing automations at controlled rate, target resources in a resource groups and execute entire automation at once or single step at a time. It is now also possible to use YAML, in addition to JSON, when creating Systems Manager documents. +* `service/waf`: Updates service API and documentation + * This release adds support for rule group and managed rule group. Rule group is a container of rules that customers can create, put rules in it and associate the rule group to a WebACL. All rules in a rule group will function identically as they would if each rule was individually associated to the WebACL. Managed rule group is a pre-configured rule group composed by our security partners and made available via the AWS Marketplace. Customers can subscribe to these managed rule groups, associate the managed rule group to their WebACL and start using them immediately to protect their resources. +* `service/waf-regional`: Updates service API and documentation + +Release v1.12.36 (2017-11-29) +=== + +### Service Client Updates +* `service/DeepInsight`: Adds new service +* `service/IronmanRuntime`: Adds new service +* `service/Orchestra - Laser`: Adds new service +* `service/SageMaker`: Adds new service +* `service/Shine`: Adds new service +* `service/archived.kinesisvideo`: Adds new service +* `service/data.kinesisvideo`: Adds new service +* `service/dynamodb`: Updates service API and documentation + * Amazon DynamoDB now supports the following features: Global Table and On-Demand Backup. Global Table is a fully-managed, multi-region, multi-master database. DynamoDB customers can now write anywhere and read anywhere with single-digit millisecond latency by performing database operations closest to where end users reside. Global Table also enables customers to disaster-proof their applications, keeping them running and data accessible even in the face of natural disasters or region disruptions. Customers can set up Global Table with just a few clicks in the AWS Management Console-no application rewrites required. On-Demand Backup capability is to protect data from loss due to application errors, and meet customers' archival needs for compliance and regulatory reasons. Customers can backup and restore their DynamoDB table data anytime, with a single-click in the AWS management console or a single API call. Backup and restore actions execute with zero impact on table performance or availability. For more information, see the Amazon DynamoDB Developer Guide. +* `service/ecs`: Updates service API and documentation + * Amazon Elastic Container Service (Amazon ECS) released a new launch type for running containers on a serverless infrastructure. The Fargate launch type allows you to run your containerized applications without the need to provision and manage the backend infrastructure. Just register your task definition and Fargate launches the container for you. +* `service/glacier`: Updates service API and documentation + * This release includes support for Glacier Select, a new feature that allows you to filter and analyze your Glacier archives and store the results in a user-specified S3 location. +* `service/greengrass`: Updates service API and documentation + * Greengrass OTA feature allows updating Greengrass Core and Greengrass OTA Agent. Local Resource Access feature allows Greengrass Lambdas to access local resources such as peripheral devices and volumes. +* `service/iot`: Updates service API and documentation + * This release adds support for a number of new IoT features, including AWS IoT Device Management (Jobs, Fleet Index and Thing Registration), Thing Groups, Policies on Thing Groups, Registry & Job Events, JSON Logs, Fine-Grained Logging Controls, Custom Authorization and AWS Service Authentication Using X.509 Certificates. +* `service/kinesisvideo`: Adds new service + * Announcing Amazon Kinesis Video Streams, a fully managed video ingestion and storage service. Kinesis Video Streams makes it easy to securely stream video from connected devices to AWS for machine learning, analytics, and processing. You can also stream other time-encoded data like RADAR and LIDAR signals using Kinesis Video Streams. +* `service/rekognition`: Updates service API, documentation, and paginators + * This release introduces Amazon Rekognition support for video analysis. +* `service/s3`: Updates service API and documentation + * This release includes support for Glacier Select, a new feature that allows you to filter and analyze your Glacier storage class objects and store the results in a user-specified S3 location. + +Release v1.12.35 (2017-11-29) +=== + +### Service Client Updates +* `service/AmazonMQ`: Adds new service +* `service/GuardDuty`: Adds new service +* `service/apigateway`: Updates service API and documentation + * Changes related to CanaryReleaseDeployment feature. Enables API developer to create a deployment as canary deployment and test API changes with percentage of customers before promoting changes to all customers. +* `service/batch`: Updates service API and documentation + * Add support for Array Jobs which allow users to easily submit many copies of a job with a single API call. This change also enhances the job dependency model to support N_TO_N and sequential dependency chains. The ListJobs and DescribeJobs APIs now have the ability to list or describe the status of entire Array Jobs or individual elements within the array. +* `service/cognito-idp`: Updates service API and documentation +* `service/deepdish`: Adds new service + * AWS AppSync is an enterprise-level, fully managed GraphQL service with real-time data synchronization and offline programming features. +* `service/ec2`: Updates service API and documentation + * Adds the following updates: 1. You are now able to host a service powered by AWS PrivateLink to provide private connectivity to other VPCs. You are now also able to create endpoints to other services powered by PrivateLink including AWS services, Marketplace Seller services or custom services created by yourself or other AWS VPC customers. 2. You are now able to save launch parameters in a single template that can be used with Auto Scaling, Spot Fleet, Spot, and On Demand instances. 3. You are now able to launch Spot instances via the RunInstances API, using a single additional parameter. RunInstances will response synchronously with an instance ID should capacity be available for your Spot request. 4. A simplified Spot pricing model which delivers low, predictable prices that adjust gradually, based on long-term trends in supply and demand. 5. Amazon EC2 Spot can now hibernate Amazon EBS-backed instances in the event of an interruption, so your workloads pick up from where they left off. Spot can fulfill your request by resuming instances from a hibernated state when capacity is available. +* `service/lambda`: Updates service API and documentation + * Lambda aliases can now shift traffic between two function versions, based on preassigned weights. + +Release v1.12.34 (2017-11-27) +=== + +### Service Client Updates +* `service/data.mediastore`: Adds new service +* `service/mediaconvert`: Adds new service + * AWS Elemental MediaConvert is a file-based video conversion service that transforms media into formats required for traditional broadcast and for internet streaming to multi-screen devices. +* `service/medialive`: Adds new service + * AWS Elemental MediaLive is a video service that lets you easily create live outputs for broadcast and streaming delivery. +* `service/mediapackage`: Adds new service + * AWS Elemental MediaPackage is a just-in-time video packaging and origination service that lets you format highly secure and reliable live outputs for a variety of devices. +* `service/mediastore`: Adds new service + * AWS Elemental MediaStore is an AWS storage service optimized for media. It gives you the performance, consistency, and low latency required to deliver live and on-demand video content. AWS Elemental MediaStore acts as the origin store in your video workflow. + +Release v1.12.33 (2017-11-22) +=== + +### Service Client Updates +* `service/acm`: Updates service API and documentation + * AWS Certificate Manager now supports the ability to import domainless certs and additional Key Types as well as an additional validation method for DNS. +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.12.32 (2017-11-22) +=== + +### Service Client Updates +* `service/apigateway`: Updates service API and documentation + * Add support for Access logs and customizable integration timeouts +* `service/cloudformation`: Updates service API and documentation + * 1) Instance-level parameter overrides (CloudFormation-StackSet feature): This feature will allow the customers to override the template parameters on specific stackInstances. Customers will also have ability to update their existing instances with/without parameter-overrides using a new API "UpdateStackInstances" 2) Add support for SSM parameters in CloudFormation - This feature will allow the customers to use Systems Manager parameters in CloudFormation templates. They will be able to see values for these parameters in Describe APIs. +* `service/codebuild`: Updates service API and documentation + * Adding support for accessing Amazon VPC resources from AWS CodeBuild, dependency caching and build badges. +* `service/elasticmapreduce`: Updates service API and documentation + * Enable Kerberos on Amazon EMR. +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/rekognition`: Updates service API and documentation + * This release includes updates to Amazon Rekognition for the following APIs. The new DetectText API allows you to recognize and extract textual content from images. Face Model Versioning has been added to operations that deal with face detection. +* `service/shield`: Updates service API, documentation, and paginators + * The AWS Shield SDK has been updated in order to support Elastic IP address protections, the addition of AttackProperties objects in DescribeAttack responses, and a new GetSubscriptionState operation. +* `service/storagegateway`: Updates service API and documentation + * AWS Storage Gateway now enables you to get notification when all your files written to your NFS file share have been uploaded to Amazon S3. Storage Gateway also enables guessing of the MIME type for uploaded objects based on file extensions. +* `service/xray`: Updates service API, documentation, and paginators + * Added automatic pagination support for AWS X-Ray APIs in the SDKs that support this feature. + +Release v1.12.31 (2017-11-20) +=== + +### Service Client Updates +* `service/apigateway`: Updates service documentation + * Documentation updates for Apigateway +* `service/codecommit`: Updates service API, documentation, and paginators + * AWS CodeCommit now supports pull requests. You can use pull requests to collaboratively review code changes for minor changes or fixes, major feature additions, or new versions of your released software. +* `service/firehose`: Updates service API and documentation + * This release includes a new Kinesis Firehose feature that supports Splunk as Kinesis Firehose delivery destination. You can now use Kinesis Firehose to ingest real-time data to Splunk in a serverless, reliable, and salable manner. This release also includes a new feature that allows you to configure Lambda buffer size in Kinesis Firehose data transformation feature. You can now customize the data buffer size before invoking Lambda function in Kinesis Firehose for data transformation. This feature allows you to flexibly trade-off processing and delivery latency with cost and efficiency based on your specific use cases and requirements. +* `service/iis`: Adds new service + * The AWS Cost Explorer API gives customers programmatic access to AWS cost and usage information, allowing them to perform adhoc queries and build interactive cost management applications that leverage this dataset. +* `service/kinesis`: Updates service API and documentation + * Customers can now obtain the important characteristics of their stream with DescribeStreamSummary. The response will not include the shard list for the stream but will have the number of open shards, and all the other fields included in the DescribeStream response. +* `service/workdocs`: Updates service API and documentation + * DescribeGroups API and miscellaneous enhancements + +### SDK Bugs +* `aws/client`: Retry delays for throttled exception were not limited to 5 mintues [#1654](https://github.com/aws/aws-sdk-go/pull/1654) + * Fixes [#1653](https://github.com/aws/aws-sdk-go/issues/1653) +Release v1.12.30 (2017-11-17) +=== + +### Service Client Updates +* `service/application-autoscaling`: Updates service API and documentation +* `service/dms`: Updates service API, documentation, and paginators + * Support for migration task assessment. Support for data validation after the migration. +* `service/elasticloadbalancingv2`: Updates service API and documentation +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/rds`: Updates service API and documentation + * Amazon RDS now supports importing MySQL databases by using backup files from Amazon S3. +* `service/s3`: Updates service API + * Added ORC to the supported S3 Inventory formats. + +### SDK Bugs +* `private/protocol/restjson`: Define JSONValue marshaling for body and querystring ([#1640](https://github.com/aws/aws-sdk-go/pull/1640)) + * Adds support for APIs which use JSONValue for body and querystring targets. + * Fixes [#1636](https://github.com/aws/aws-sdk-go/issues/1636) +Release v1.12.29 (2017-11-16) +=== + +### Service Client Updates +* `service/application-autoscaling`: Updates service API and documentation +* `service/ec2`: Updates service API + * You are now able to create and launch EC2 x1e smaller instance sizes +* `service/glue`: Updates service API and documentation + * API update for AWS Glue. New crawler configuration attribute enables customers to specify crawler behavior. New XML classifier enables classification of XML data. +* `service/opsworkscm`: Updates service API, documentation, and waiters + * Documentation updates for OpsWorks-cm: a new feature, OpsWorks for Puppet Enterprise, that allows users to create and manage OpsWorks-hosted Puppet Enterprise servers. +* `service/organizations`: Updates service API, documentation, and paginators + * This release adds APIs that you can use to enable and disable integration with AWS services designed to work with AWS Organizations. This integration allows the AWS service to perform operations on your behalf on all of the accounts in your organization. Although you can use these APIs yourself, we recommend that you instead use the commands provided in the other AWS service to enable integration with AWS Organizations. +* `service/route53`: Updates service API and documentation + * You can use Route 53's GetAccountLimit/GetHostedZoneLimit/GetReusableDelegationSetLimit APIs to view your current limits (including custom set limits) on Route 53 resources such as hosted zones and health checks. These APIs also return the number of each resource you're currently using to enable comparison against your current limits. + +Release v1.12.28 (2017-11-15) +=== + +### Service Client Updates +* `service/apigateway`: Updates service API and documentation + * 1. Extended GetDocumentationParts operation to support retrieving documentation parts resources without contents. 2. Added hosted zone ID in the custom domain response. +* `service/email`: Updates service API, documentation, and examples + * SES launches Configuration Set Reputation Metrics and Email Pausing Today, two features that build upon the capabilities of the reputation dashboard. The first is the ability to export reputation metrics for individual configuration sets. The second is the ability to temporarily pause email sending, either at the configuration set level, or across your entire Amazon SES account. +* `service/polly`: Updates service API + * Amazon Polly adds Korean language support with new female voice - "Seoyeon" and new Indian English female voice - "Aditi" +* `service/states`: Updates service API and documentation + * You can now use the UpdateStateMachine API to update your state machine definition and role ARN. Existing executions will continue to use the previous definition and role ARN. You can use the DescribeStateMachineForExecution API to determine which state machine definition and role ARN is associated with an execution + +Release v1.12.27 (2017-11-14) +=== + +### Service Client Updates +* `service/ecs`: Updates service API and documentation + * Added new mode for Task Networking in ECS, called awsvpc mode. Mode configuration parameters to be passed in via awsvpcConfiguration. Updated APIs now use/show this new mode - RegisterTaskDefinition, CreateService, UpdateService, RunTask, StartTask. +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/lightsail`: Updates service API and documentation + * Lightsail now supports attached block storage, which allows you to scale your applications and protect application data with additional SSD-backed storage disks. This feature allows Lightsail customers to attach secure storage disks to their Lightsail instances and manage their attached disks, including creating and deleting disks, attaching and detaching disks from instances, and backing up disks via snapshot. +* `service/route53`: Updates service API and documentation + * When a Route 53 health check or hosted zone is created by a linked AWS service, the object now includes information about the service that created it. Hosted zones or health checks that are created by a linked service can't be updated or deleted using Route 53. +* `service/ssm`: Updates service API and documentation + * EC2 Systems Manager GetInventory API adds support for aggregation. + +### SDK Enhancements +* `aws/request`: Remove default port from HTTP host header ([#1618](https://github.com/aws/aws-sdk-go/pull/1618)) + * Updates the SDK to automatically remove default ports based on the URL's scheme when setting the HTTP Host header's value. + * Fixes [#1537](https://github.com/aws/aws-sdk-go/issues/1537) + +Release v1.12.26 (2017-11-09) +=== + +### Service Client Updates +* `service/ec2`: Updates service API and documentation + * Introduces the following features: 1. Create a default subnet in an Availability Zone if no default subnet exists. 2. Spot Fleet integrates with Elastic Load Balancing to enable you to attach one or more load balancers to a Spot Fleet request. When you attach the load balancer, it automatically registers the instance in the Spot Fleet to the load balancers which distributes incoming traffic across the instances. +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.12.25 (2017-11-08) +=== + +### Service Client Updates +* `service/application-autoscaling`: Updates service API and documentation +* `service/batch`: Updates service documentation + * Documentation updates for AWS Batch. +* `service/ec2`: Updates service API and documentation + * AWS PrivateLink for Amazon Services - Customers can now privately access Amazon services from their Amazon Virtual Private Cloud (VPC), without using public IPs, and without requiring the traffic to traverse across the Internet. +* `service/elasticache`: Updates service API and documentation + * This release adds online resharding for ElastiCache for Redis offering, providing the ability to add and remove shards from a running cluster. Developers can now dynamically scale-out or scale-in their Redis cluster workloads to adapt to changes in demand. ElastiCache will resize the cluster by adding or removing shards and redistribute hash slots uniformly across the new shard configuration, all while the cluster continues to stay online and serves requests. +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.12.24 (2017-11-07) +=== + +### Service Client Updates +* `service/elasticloadbalancingv2`: Updates service documentation +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/rds`: Updates service API and documentation + * DescribeOrderableDBInstanceOptions now returns the minimum and maximum allowed values for storage size, total provisioned IOPS, and provisioned IOPS per GiB for a DB instance. +* `service/s3`: Updates service API, documentation, and examples + * This releases adds support for 4 features: 1. Default encryption for S3 Bucket, 2. Encryption status in inventory and Encryption support for inventory. 3. Cross region replication of KMS-encrypted objects, and 4. ownership overwrite for CRR. + +Release v1.12.23 (2017-11-07) +=== + +### Service Client Updates +* `service/api.pricing`: Adds new service +* `service/ec2`: Updates service API + * You are now able to create and launch EC2 C5 instances, the next generation of EC2's compute-optimized instances, in us-east-1, us-west-2 and eu-west-1. C5 instances offer up to 72 vCPUs, 144 GiB of DDR4 instance memory, 25 Gbps in Network bandwidth and improved EBS and Networking bandwidth on smaller instance sizes to deliver improved performance for compute-intensive workloads. +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/kms`: Updates service API, documentation, and examples + * Documentation updates for AWS KMS. +* `service/organizations`: Updates service documentation + * This release updates permission statements for several API operations, and corrects some other minor errors. +* `service/states`: Updates service API, documentation, and paginators + * Documentation update. + +Release v1.12.22 (2017-11-03) +=== + +### Service Client Updates +* `service/ecs`: Updates service API and documentation + * Amazon ECS users can now add devices to their containers and enable init process in containers through the use of docker's 'devices' and 'init' features. These fields can be specified under linuxParameters in ContainerDefinition in the Task Definition Template. +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.12.21 (2017-11-02) +=== + +### Service Client Updates +* `service/apigateway`: Updates service API and documentation + * This release supports creating and managing Regional and Edge-Optimized API endpoints. +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +### SDK Bugs +* `aws/request`: Fix bug in request presign creating invalide URL ([#1624](https://github.com/aws/aws-sdk-go/pull/1624)) + * Fixes a bug the Request Presign and PresignRequest methods that would allow a invalid expire duration as input. A expire time of 0 would be interpreted by the SDK to generate a normal request signature, not a presigned URL. This caused the returned URL unusable. + * Fixes [#1617](https://github.com/aws/aws-sdk-go/issues/1617) +Release v1.12.20 (2017-11-01) +=== + +### Service Client Updates +* `service/acm`: Updates service documentation + * Documentation updates for ACM +* `service/cloudhsmv2`: Updates service documentation + * Minor documentation update for AWS CloudHSM (cloudhsmv2). +* `service/directconnect`: Updates service API and documentation + * AWS DirectConnect now provides support for Global Access for Virtual Private Cloud (VPC) via a new feature called Direct Connect Gateway. A Direct Connect Gateway will allow you to group multiple Direct Connect Private Virtual Interfaces (DX-VIF) and Private Virtual Gateways (VGW) from different AWS regions (but belonging to the same AWS Account) and pass traffic from any DX-VIF to any VPC in the grouping. +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +### SDK Enhancements +* `aws/client`: Adding status code 429 to throttlable status codes in default retryer (#1621) + +Release v1.12.19 (2017-10-26) +=== + +### Service Client Updates +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.12.18 (2017-10-26) +=== + +### Service Client Updates +* `service/cloudfront`: Updates service API and documentation + * You can now specify additional options for MinimumProtocolVersion, which controls the SSL/TLS protocol that CloudFront uses to communicate with viewers. The minimum protocol version that you choose also determines the ciphers that CloudFront uses to encrypt the content that it returns to viewers. +* `service/ec2`: Updates service API + * You are now able to create and launch EC2 P3 instance, next generation GPU instances, optimized for machine learning and high performance computing applications. With up to eight NVIDIA Tesla V100 GPUs, P3 instances provide up to one petaflop of mixed-precision, 125 teraflops of single-precision, and 62 teraflops of double-precision floating point performance, as well as a 300 GB/s second-generation NVLink interconnect that enables high-speed, low-latency GPU-to-GPU communication. P3 instances also feature up to 64 vCPUs based on custom Intel Xeon E5 (Broadwell) processors, 488 GB of DRAM, and 25 Gbps of dedicated aggregate network bandwidth using the Elastic Network Adapter (ENA). +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.12.17 (2017-10-24) +=== + +### Service Client Updates +* `service/config`: Updates service API +* `service/elasticache`: Updates service API, documentation, and examples + * Amazon ElastiCache for Redis today announced support for data encryption both for data in-transit and data at-rest. The new encryption in-transit functionality enables ElastiCache for Redis customers to encrypt data for all communication between clients and Redis engine, and all intra-cluster Redis communication. The encryption at-rest functionality allows customers to encrypt their S3 based backups. Customers can begin using the new functionality by simply enabling this functionality via AWS console, and a small configuration change in their Redis clients. The ElastiCache for Redis service automatically manages life cycle of the certificates required for encryption, including the issuance, renewal and expiration of certificates. Additionally, as part of this launch, customers will gain the ability to start using the Redis AUTH command that provides an added level of authentication. +* `service/glue`: Adds new service + * AWS Glue: Adding a new API, BatchStopJobRun, to stop one or more job runs for a specified Job. +* `service/pinpoint`: Updates service API and documentation + * Added support for APNs VoIP messages. Added support for collapsible IDs, message priority, and TTL for APNs and FCM/GCM. + +Release v1.12.16 (2017-10-23) +=== + +### Service Client Updates +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/organizations`: Updates service API and documentation + * This release supports integrating other AWS services with AWS Organizations through the use of an IAM service-linked role called AWSServiceRoleForOrganizations. Certain operations automatically create that role if it does not already exist. + +Release v1.12.15 (2017-10-20) +=== + +### Service Client Updates +* `service/ec2`: Updates service API and documentation + * Adding pagination support for DescribeSecurityGroups for EC2 Classic and VPC Security Groups + +Release v1.12.14 (2017-10-19) +=== + +### Service Client Updates +* `service/sqs`: Updates service API and documentation + * Added support for tracking cost allocation by adding, updating, removing, and listing the metadata tags of Amazon SQS queues. +* `service/ssm`: Updates service API and documentation + * EC2 Systems Manager versioning support for Parameter Store. Also support for referencing parameter versions in SSM Documents. + +Release v1.12.13 (2017-10-18) +=== + +### Service Client Updates +* `service/lightsail`: Updates service API and documentation + * This release adds support for Windows Server-based Lightsail instances. The GetInstanceAccessDetails API now returns the password of your Windows Server-based instance when using the default key pair. GetInstanceAccessDetails also returns a PasswordData object for Windows Server instances containing the ciphertext and keyPairName. The Blueprint data type now includes a list of platform values (LINUX_UNIX or WINDOWS). The Bundle data type now includes a list of SupportedPlatforms values (LINUX_UNIX or WINDOWS). + +Release v1.12.12 (2017-10-17) +=== + +### Service Client Updates +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/es`: Updates service API and documentation + * This release adds support for VPC access to Amazon Elasticsearch Service. + * This release adds support for VPC access to Amazon Elasticsearch Service. + +Release v1.12.11 (2017-10-16) +=== + +### Service Client Updates +* `service/cloudhsm`: Updates service API and documentation + * Documentation updates for AWS CloudHSM Classic. +* `service/ec2`: Updates service API and documentation + * You can now change the tenancy of your VPC from dedicated to default with a single API operation. For more details refer to the documentation for changing VPC tenancy. +* `service/es`: Updates service API and documentation + * AWS Elasticsearch adds support for enabling slow log publishing. Using slow log publishing options customers can configure and enable index/query slow log publishing of their domain to preferred AWS Cloudwatch log group. +* `service/rds`: Updates service API and waiters + * Adds waiters for DBSnapshotAvailable and DBSnapshotDeleted. +* `service/waf`: Updates service API and documentation + * This release adds support for regular expressions as match conditions in rules, and support for geographical location by country of request IP address as a match condition in rules. +* `service/waf-regional`: Updates service API and documentation + +Release v1.12.10 (2017-10-12) +=== + +### Service Client Updates +* `service/codecommit`: Updates service API and documentation + * This release includes the DeleteBranch API and a change to the contents of a Commit object. +* `service/dms`: Updates service API and documentation + * This change includes addition of new optional parameter to an existing API +* `service/elasticbeanstalk`: Updates service API and documentation + * Added the ability to add, delete or update Tags +* `service/polly`: Updates service API + * Amazon Polly exposes two new voices: "Matthew" (US English) and "Takumi" (Japanese) +* `service/rds`: Updates service API and documentation + * You can now call DescribeValidDBInstanceModifications to learn what modifications you can make to your DB instance. You can use this information when you call ModifyDBInstance. + +Release v1.12.9 (2017-10-11) +=== + +### Service Client Updates +* `service/ecr`: Updates service API, documentation, and paginators + * Adds support for new API set used to manage Amazon ECR repository lifecycle policies. Amazon ECR lifecycle policies enable you to specify the lifecycle management of images in a repository. The configuration is a set of one or more rules, where each rule defines an action for Amazon ECR to apply to an image. This allows the automation of cleaning up unused images, for example expiring images based on age or status. A lifecycle policy preview API is provided as well, which allows you to see the impact of a lifecycle policy on an image repository before you execute it +* `service/email`: Updates service API and documentation + * Added content related to email template management and templated email sending operations. +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.12.8 (2017-10-10) +=== + +### Service Client Updates +* `service/ec2`: Updates service API and documentation + * This release includes updates to AWS Virtual Private Gateway. +* `service/elasticloadbalancingv2`: Updates service API and documentation +* `service/opsworkscm`: Updates service API and documentation + * Provide engine specific information for node associations. + +Release v1.12.7 (2017-10-06) +=== + +### Service Client Updates +* `service/sqs`: Updates service documentation + * Documentation updates regarding availability of FIFO queues and miscellaneous corrections. + +Release v1.12.6 (2017-10-05) +=== + +### Service Client Updates +* `service/redshift`: Updates service API and documentation + * DescribeEventSubscriptions API supports tag keys and tag values as request parameters. + +Release v1.12.5 (2017-10-04) +=== + +### Service Client Updates +* `service/kinesisanalytics`: Updates service API and documentation + * Kinesis Analytics now supports schema discovery on objects in S3. Additionally, Kinesis Analytics now supports input data preprocessing through Lambda. +* `service/route53domains`: Updates service API and documentation + * Added a new API that checks whether a domain name can be transferred to Amazon Route 53. + +### SDK Bugs +* `service/s3/s3crypto`: Correct PutObjectRequest documentation ([#1568](https://github.com/aws/aws-sdk-go/pull/1568)) + * s3Crypto's PutObjectRequest docstring example was using an incorrect value. Corrected the type used in the example. +Release v1.12.4 (2017-10-03) +=== + +### Service Client Updates +* `service/ec2`: Updates service API, documentation, and waiters + * This release includes service updates to AWS VPN. +* `service/ssm`: Updates service API and documentation + * EC2 Systems Manager support for tagging SSM Documents. Also support for tag-based permissions to restrict access to SSM Documents based on these tags. + +Release v1.12.3 (2017-10-02) +=== + +### Service Client Updates +* `service/cloudhsm`: Updates service documentation and paginators + * Documentation updates for CloudHSM + +Release v1.12.2 (2017-09-29) +=== + +### Service Client Updates +* `service/appstream`: Updates service API and documentation + * Includes APIs for managing and accessing image builders, and deleting images. +* `service/codebuild`: Updates service API and documentation + * Adding support for Building GitHub Pull Requests in AWS CodeBuild +* `service/mturk-requester`: Updates service API and documentation +* `service/organizations`: Updates service API and documentation + * This release flags the HandshakeParty structure's Type and Id fields as 'required'. They effectively were required in the past, as you received an error if you did not include them. This is now reflected at the API definition level. +* `service/route53`: Updates service API and documentation + * This change allows customers to reset elements of health check. + +### SDK Bugs +* `private/protocol/query`: Fix query protocol handling of nested byte slices ([#1557](https://github.com/aws/aws-sdk-go/issues/1557)) + * Fixes the query protocol to correctly marshal nested []byte values of API operations. +* `service/s3`: Fix PutObject and UploadPart API to include ContentMD5 field ([#1559](https://github.com/aws/aws-sdk-go/pull/1559)) + * Fixes the SDK's S3 PutObject and UploadPart API code generation to correctly render the ContentMD5 field into the associated input types for these two API operations. + * Fixes [#1553](https://github.com/aws/aws-sdk-go/pull/1553) +Release v1.12.1 (2017-09-27) +=== + +### Service Client Updates +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/pinpoint`: Updates service API and documentation + * Added two new push notification channels: Amazon Device Messaging (ADM) and, for push notification support in China, Baidu Cloud Push. Added support for APNs auth via .p8 key file. Added operation for direct message deliveries to user IDs, enabling you to message an individual user on multiple endpoints. + +Release v1.12.0 (2017-09-26) +=== + +### SDK Bugs +* `API Marshaler`: Revert REST JSON and XML protocol marshaler improvements + * Bug [#1550](https://github.com/aws/aws-sdk-go/issues/1550) identified a missed condition in the Amazon Route 53 RESTXML protocol marshaling causing requests to that service to fail. Reverting the marshaler improvements until the bug can be fixed. + +Release v1.11.0 (2017-09-26) +=== + +### Service Client Updates +* `service/cloudformation`: Updates service API and documentation + * You can now prevent a stack from being accidentally deleted by enabling termination protection on the stack. If you attempt to delete a stack with termination protection enabled, the deletion fails and the stack, including its status, remains unchanged. You can enable termination protection on a stack when you create it. Termination protection on stacks is disabled by default. After creation, you can set termination protection on a stack whose status is CREATE_COMPLETE, UPDATE_COMPLETE, or UPDATE_ROLLBACK_COMPLETE. + +### SDK Features +* Add dep Go dependency management metadata files (#1544) + * Adds the Go `dep` dependency management metadata files to the SDK. + * Fixes [#1451](https://github.com/aws/aws-sdk-go/issues/1451) + * Fixes [#634](https://github.com/aws/aws-sdk-go/issues/634) +* `service/dynamodb/expression`: Add expression building utility for DynamoDB ([#1527](https://github.com/aws/aws-sdk-go/pull/1527)) + * Adds a new package, expression, to the SDK providing builder utilities to create DynamoDB expressions safely taking advantage of type safety. +* `API Marshaler`: Add generated marshalers for RESTXML protocol ([#1409](https://github.com/aws/aws-sdk-go/pull/1409)) + * Updates the RESTXML protocol marshaler to use generated code instead of reflection for REST XML based services. +* `API Marshaler`: Add generated marshalers for RESTJSON protocol ([#1547](https://github.com/aws/aws-sdk-go/pull/1547)) + * Updates the RESTJSON protocol marshaler to use generated code instead of reflection for REST JSON based services. + +### SDK Enhancements +* `private/protocol`: Update format of REST JSON and XMl benchmarks ([#1546](https://github.com/aws/aws-sdk-go/pull/1546)) + * Updates the format of the REST JSON and XML benchmarks to be readable. RESTJSON benchmarks were updated to more accurately bench building of the protocol. + +Release v1.10.51 (2017-09-22) +=== + +### Service Client Updates +* `service/config`: Updates service API and documentation +* `service/ecs`: Updates service API and documentation + * Amazon ECS users can now add and drop Linux capabilities to their containers through the use of docker's cap-add and cap-drop features. Customers can specify the capabilities they wish to add or drop for each container in their task definition. +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/rds`: Updates service documentation + * Documentation updates for rds + +Release v1.10.50 (2017-09-21) +=== + +### Service Client Updates +* `service/budgets`: Updates service API + * Including "DuplicateRecordException" in UpdateNotification and UpdateSubscriber. +* `service/ec2`: Updates service API and documentation + * Add EC2 APIs to copy Amazon FPGA Images (AFIs) within the same region and across multiple regions, delete AFIs, and modify AFI attributes. AFI attributes include name, description and granting/denying other AWS accounts to load the AFI. +* `service/logs`: Updates service API and documentation + * Adds support for associating LogGroups with KMS Keys. + +### SDK Bugs +* Fix greengrass service model being duplicated with different casing. ([#1541](https://github.com/aws/aws-sdk-go/pull/1541)) + * Fixes [#1540](https://github.com/aws/aws-sdk-go/issues/1540) + * Fixes [#1539](https://github.com/aws/aws-sdk-go/issues/1539) +Release v1.10.49 (2017-09-20) +=== + +### Service Client Updates +* `service/Greengrass`: Adds new service +* `service/appstream`: Updates service API and documentation + * API updates for supporting On-Demand fleets. +* `service/codepipeline`: Updates service API and documentation + * This change includes a PipelineMetadata object that is part of the output from the GetPipeline API that includes the Pipeline ARN, created, and updated timestamp. +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/rds`: Updates service API and documentation + * Introduces the --option-group-name parameter to the ModifyDBSnapshot CLI command. You can specify this parameter when you upgrade an Oracle DB snapshot. The same option group considerations apply when upgrading a DB snapshot as when upgrading a DB instance. For more information, see http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Oracle.html#USER_UpgradeDBInstance.Oracle.OGPG.OG +* `service/runtime.lex`: Updates service API and documentation + +Release v1.10.48 (2017-09-19) +=== + +### Service Client Updates +* `service/ec2`: Updates service API + * Fixed bug in EC2 clients preventing ElasticGpuSet from being set. + +### SDK Enhancements +* `aws/credentials`: Add EnvProviderName constant. ([#1531](https://github.com/aws/aws-sdk-go/issues/1531)) + * Adds the "EnvConfigCredentials" string literal as EnvProviderName constant. + * Fixes [#1444](https://github.com/aws/aws-sdk-go/issues/1444) + +Release v1.10.47 (2017-09-18) +=== + +### Service Client Updates +* `service/ec2`: Updates service API and documentation + * Amazon EC2 now lets you opt for Spot instances to be stopped in the event of an interruption instead of being terminated. Your Spot request can be fulfilled again by restarting instances from a previously stopped state, subject to availability of capacity at or below your preferred price. When you submit a persistent Spot request, you can choose from "terminate" or "stop" as the instance interruption behavior. Choosing "stop" will shutdown your Spot instances so you can continue from this stopped state later on. This feature is only available for instances with Amazon EBS volume as their root device. +* `service/email`: Updates service API and documentation + * Amazon Simple Email Service (Amazon SES) now lets you customize the domains used for tracking open and click events. Previously, open and click tracking links referred to destinations hosted on domains operated by Amazon SES. With this feature, you can use your own branded domains for capturing open and click events. +* `service/iam`: Updates service API and documentation + * A new API, DeleteServiceLinkedRole, submits a service-linked role deletion request and returns a DeletionTaskId, which you can use to check the status of the deletion. + +Release v1.10.46 (2017-09-15) +=== + +### Service Client Updates +* `service/apigateway`: Updates service API and documentation + * Add a new enum "REQUEST" to '--type ' field in the current create-authorizer API, and make "identitySource" optional. + +Release v1.10.45 (2017-09-14) +=== + +### Service Client Updates +* `service/codebuild`: Updates service API and documentation + * Supporting Parameter Store in environment variables for AWS CodeBuild +* `service/organizations`: Updates service documentation + * Documentation updates for AWS Organizations +* `service/servicecatalog`: Updates service API, documentation, and paginators + * This release of Service Catalog adds API support to copy products. + +Release v1.10.44 (2017-09-13) +=== + +### Service Client Updates +* `service/autoscaling`: Updates service API and documentation + * Customers can create Life Cycle Hooks at the time of creating Auto Scaling Groups through the CreateAutoScalingGroup API +* `service/batch`: Updates service documentation and examples + * Documentation updates for batch +* `service/ec2`: Updates service API + * You are now able to create and launch EC2 x1e.32xlarge instance, a new EC2 instance in the X1 family, in us-east-1, us-west-2, eu-west-1, and ap-northeast-1. x1e.32xlarge offers 128 vCPUs, 3,904 GiB of DDR4 instance memory, high memory bandwidth, large L3 caches, and leading reliability capabilities to boost the performance and reliability of in-memory applications. +* `service/events`: Updates service API and documentation + * Exposes ConcurrentModificationException as one of the valid exceptions for PutPermission and RemovePermission operation. + +### SDK Enhancements +* `service/autoscaling`: Fix documentation for PutScalingPolicy.AutoScalingGroupName [#1522](https://github.com/aws/aws-sdk-go/pull/1522) +* `service/s3/s3manager`: Clarify S3 Upload manager Concurrency config [#1521](https://github.com/aws/aws-sdk-go/pull/1521) + * Fixes [#1458](https://github.com/aws/aws-sdk-go/issues/1458) +* `service/dynamodb/dynamodbattribute`: Add support for time alias. [#1520](https://github.com/aws/aws-sdk-go/pull/1520) + * Related to [#1505](https://github.com/aws/aws-sdk-go/pull/1505) + +Release v1.10.43 (2017-09-12) +=== + +### Service Client Updates +* `service/ec2`: Updates service API + * Fixed bug in EC2 clients preventing HostOfferingSet from being set +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.10.42 (2017-09-12) +=== + +### Service Client Updates +* `service/devicefarm`: Updates service API and documentation + * DeviceFarm has added support for two features - RemoteDebugging and Customer Artifacts. Customers can now do remote Debugging on their Private Devices and can now retrieve custom files generated by their tests on the device and the device host (execution environment) on both public and private devices. + +Release v1.10.41 (2017-09-08) +=== + +### Service Client Updates +* `service/logs`: Updates service API and documentation + * Adds support for the PutResourcePolicy, DescribeResourcePolicy and DeleteResourcePolicy APIs. + +Release v1.10.40 (2017-09-07) +=== + +### Service Client Updates +* `service/application-autoscaling`: Updates service documentation +* `service/ec2`: Updates service API and documentation + * With Tagging support, you can add Key and Value metadata to search, filter and organize your NAT Gateways according to your organization's needs. +* `service/elasticloadbalancingv2`: Updates service API and documentation +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/lex-models`: Updates service API and documentation +* `service/route53`: Updates service API and documentation + * You can configure Amazon Route 53 to log information about the DNS queries that Amazon Route 53 receives for your domains and subdomains. When you configure query logging, Amazon Route 53 starts to send logs to CloudWatch Logs. You can use various tools, including the AWS console, to access the query logs. + +Release v1.10.39 (2017-09-06) +=== + +### Service Client Updates +* `service/budgets`: Updates service API and documentation + * Add an optional "thresholdType" to notifications to support percentage or absolute value thresholds. + +Release v1.10.38 (2017-09-05) +=== + +### Service Client Updates +* `service/codestar`: Updates service API and documentation + * Added support to tag CodeStar projects. Tags can be used to organize and find CodeStar projects on key-value pairs that you can choose. For example, you could add a tag with a key of "Release" and a value of "Beta" to projects your organization is working on for an upcoming beta release. +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.10.37 (2017-09-01) +=== + +### Service Client Updates +* `service/MobileHub`: Adds new service +* `service/gamelift`: Updates service API and documentation + * GameLift VPC resources can be peered with any other AWS VPC. R4 memory-optimized instances now available to deploy. +* `service/ssm`: Updates service API and documentation + * Adding KMS encryption support to SSM Inventory Resource Data Sync. Exposes the ClientToken parameter on SSM StartAutomationExecution to provide idempotent execution requests. + +Release v1.10.36 (2017-08-31) +=== + +### Service Client Updates +* `service/codebuild`: Updates service API, documentation, and examples + * The AWS CodeBuild HTTP API now provides the BatchDeleteBuilds operation, which enables you to delete existing builds. +* `service/ec2`: Updates service API and documentation + * Descriptions for Security Group Rules enables customers to be able to define a description for ingress and egress security group rules . The Descriptions for Security Group Rules feature supports one description field per Security Group rule for both ingress and egress rules . Descriptions for Security Group Rules provides a simple way to describe the purpose or function of a Security Group Rule allowing for easier customer identification of configuration elements . Prior to the release of Descriptions for Security Group Rules , customers had to maintain a separate system outside of AWS if they wanted to track Security Group Rule mapping and their purpose for being implemented. If a security group rule has already been created and you would like to update or change your description for that security group rule you can use the UpdateSecurityGroupRuleDescription API. +* `service/elasticloadbalancingv2`: Updates service API and documentation +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/lex-models`: Updates service API and documentation + +### SDK Bugs +* `aws/signer/v4`: Revert [#1491](https://github.com/aws/aws-sdk-go/issues/1491) as change conflicts with an undocumented AWS v4 signature test case. + * Related to: [#1495](https://github.com/aws/aws-sdk-go/issues/1495). +Release v1.10.35 (2017-08-30) +=== + +### Service Client Updates +* `service/application-autoscaling`: Updates service API and documentation +* `service/organizations`: Updates service API and documentation + * The exception ConstraintViolationException now contains a new reason subcode MASTERACCOUNT_MISSING_CONTACT_INFO to make it easier to understand why attempting to remove an account from an Organization can fail. We also improved several other of the text descriptions and examples. + +Release v1.10.34 (2017-08-29) +=== + +### Service Client Updates +* `service/config`: Updates service API and documentation +* `service/ec2`: Updates service API and documentation + * Provides capability to add secondary CIDR blocks to a VPC. + +### SDK Bugs +* `aws/signer/v4`: Fix Signing Unordered Multi Value Query Parameters ([#1491](https://github.com/aws/aws-sdk-go/pull/1491)) + * Removes sorting of query string values when calculating v4 signing as this is not part of the spec. The spec only requires the keys, not values, to be sorted which is achieved by Query.Encode(). +Release v1.10.33 (2017-08-25) +=== + +### Service Client Updates +* `service/cloudformation`: Updates service API and documentation + * Rollback triggers enable you to have AWS CloudFormation monitor the state of your application during stack creation and updating, and to roll back that operation if the application breaches the threshold of any of the alarms you've specified. +* `service/gamelift`: Updates service API + * Update spelling of MatchmakingTicket status values for internal consistency. +* `service/rds`: Updates service API and documentation + * Option group options now contain additional properties that identify requirements for certain options. Check these properties to determine if your DB instance must be in a VPC or have auto minor upgrade turned on before you can use an option. Check to see if you can downgrade the version of an option after you have installed it. + +### SDK Enhancements +* `example/service/ec2`: Add EC2 list instances example ([#1492](https://github.com/aws/aws-sdk-go/pull/1492)) + +Release v1.10.32 (2017-08-25) +=== + +### Service Client Updates +* `service/rekognition`: Updates service API, documentation, and examples + * Update the enum value of LandmarkType and GenderType to be consistent with service response + +Release v1.10.31 (2017-08-23) +=== + +### Service Client Updates +* `service/appstream`: Updates service documentation + * Documentation updates for appstream +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.10.30 (2017-08-22) +=== + +### Service Client Updates +* `service/ssm`: Updates service API and documentation + * Changes to associations in Systems Manager State Manager can now be recorded. Previously, when you edited associations, you could not go back and review older association settings. Now, associations are versioned, and can be named using human-readable strings, allowing you to see a trail of association changes. You can also perform rate-based scheduling, which allows you to schedule associations more granularly. + +Release v1.10.29 (2017-08-21) +=== + +### Service Client Updates +* `service/firehose`: Updates service API, documentation, and paginators + * This change will allow customers to attach a Firehose delivery stream to an existing Kinesis stream directly. You no longer need a forwarder to move data from a Kinesis stream to a Firehose delivery stream. You can now run your streaming applications on your Kinesis stream and easily attach a Firehose delivery stream to it for data delivery to S3, Redshift, or Elasticsearch concurrently. +* `service/route53`: Updates service API and documentation + * Amazon Route 53 now supports CAA resource record type. A CAA record controls which certificate authorities are allowed to issue certificates for the domain or subdomain. + +Release v1.10.28 (2017-08-18) +=== + +### Service Client Updates +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.10.27 (2017-08-16) +=== + +### Service Client Updates +* `service/gamelift`: Updates service API and documentation + * The Matchmaking Grouping Service is a new feature that groups player match requests for a given game together into game sessions based on developer configured rules. + +### SDK Enhancements +* `aws/arn`: aws/arn: Package for parsing and producing ARNs ([#1463](https://github.com/aws/aws-sdk-go/pull/1463)) + * Adds the `arn` package for AWS ARN parsing and building. Use this package to build AWS ARNs for services such as outlined in the [documentation](http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + +### SDK Bugs +* `aws/signer/v4`: Correct V4 presign signature to include content sha25 in URL ([#1469](https://github.com/aws/aws-sdk-go/pull/1469)) + * Updates the V4 signer so that when a Presign is generated the `X-Amz-Content-Sha256` header is added to the query string instead of being required to be in the header. This allows you to generate presigned URLs for GET requests, e.g S3.GetObject that do not require additional headers to be set by the downstream users of the presigned URL. + * Related To: [#1467](https://github.com/aws/aws-sdk-go/issues/1467) + +Release v1.10.26 (2017-08-15) +=== + +### Service Client Updates +* `service/ec2`: Updates service API + * Fixed bug in EC2 clients preventing HostReservation from being set +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.10.25 (2017-08-14) +=== + +### Service Client Updates +* `service/AWS Glue`: Adds new service +* `service/batch`: Updates service API and documentation + * This release enhances the DescribeJobs API to include the CloudWatch logStreamName attribute in ContainerDetail and ContainerDetailAttempt +* `service/cloudhsmv2`: Adds new service + * CloudHSM provides hardware security modules for protecting sensitive data and cryptographic keys within an EC2 VPC, and enable the customer to maintain control over key access and use. This is a second-generation of the service that will improve security, lower cost and provide better customer usability. +* `service/elasticfilesystem`: Updates service API, documentation, and paginators + * Customers can create encrypted EFS file systems and specify a KMS master key to encrypt it with. +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/mgh`: Adds new service + * AWS Migration Hub provides a single location to track migrations across multiple AWS and partner solutions. Using Migration Hub allows you to choose the AWS and partner migration tools that best fit your needs, while providing visibility into the status of your entire migration portfolio. Migration Hub also provides key metrics and progress for individual applications, regardless of which tools are being used to migrate them. For example, you might use AWS Database Migration Service, AWS Server Migration Service, and partner migration tools to migrate an application comprised of a database, virtualized web servers, and a bare metal server. Using Migration Hub will provide you with a single screen that shows the migration progress of all the resources in the application. This allows you to quickly get progress updates across all of your migrations, easily identify and troubleshoot any issues, and reduce the overall time and effort spent on your migration projects. Migration Hub is available to all AWS customers at no additional charge. You only pay for the cost of the migration tools you use, and any resources being consumed on AWS. +* `service/ssm`: Updates service API and documentation + * Systems Manager Maintenance Windows include the following changes or enhancements: New task options using Systems Manager Automation, AWS Lambda, and AWS Step Functions; enhanced ability to edit the targets of a Maintenance Window, including specifying a target name and description, and ability to edit the owner field; enhanced ability to edits tasks; enhanced support for Run Command parameters; and you can now use a --safe flag when attempting to deregister a target. If this flag is enabled when you attempt to deregister a target, the system returns an error if the target is referenced by any task. Also, Systems Manager now includes Configuration Compliance to scan your fleet of managed instances for patch compliance and configuration inconsistencies. You can collect and aggregate data from multiple AWS accounts and Regions, and then drill down into specific resources that aren't compliant. +* `service/storagegateway`: Updates service API and documentation + * Add optional field ForceDelete to DeleteFileShare api. + +Release v1.10.24 (2017-08-11) +=== + +### Service Client Updates +* `service/codedeploy`: Updates service API and documentation + * Adds support for specifying Application Load Balancers in deployment groups, for both in-place and blue/green deployments. +* `service/cognito-idp`: Updates service API and documentation +* `service/ec2`: Updates service API and documentation + * Provides customers an opportunity to recover an EIP that was released + Release v1.10.23 (2017-08-10) === diff --git a/vendor/github.com/aws/aws-sdk-go/CONTRIBUTING.md b/vendor/github.com/aws/aws-sdk-go/CONTRIBUTING.md index 7c0186f0c..6f422a95e 100644 --- a/vendor/github.com/aws/aws-sdk-go/CONTRIBUTING.md +++ b/vendor/github.com/aws/aws-sdk-go/CONTRIBUTING.md @@ -67,7 +67,7 @@ Please be aware of the following notes prior to opening a pull request: 5. The JSON files under the SDK's `models` folder are sourced from outside the SDK. Such as `models/apis/ec2/2016-11-15/api.json`. We will not accept pull requests directly on these models. If you discover an issue with the models please - create a Github [issue](issues) describing the issue. + create a [GitHub issue][issues] describing the issue. ### Testing diff --git a/vendor/github.com/aws/aws-sdk-go/Gopkg.lock b/vendor/github.com/aws/aws-sdk-go/Gopkg.lock new file mode 100644 index 000000000..854c94fdf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/Gopkg.lock @@ -0,0 +1,20 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/go-ini/ini" + packages = ["."] + revision = "300e940a926eb277d3901b20bdfcc54928ad3642" + version = "v1.25.4" + +[[projects]] + name = "github.com/jmespath/go-jmespath" + packages = ["."] + revision = "0b12d6b5" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "51a86a867df617990082dec6b868e4efe2fdb2ed0e02a3daa93cd30f962b5085" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/aws/aws-sdk-go/Gopkg.toml b/vendor/github.com/aws/aws-sdk-go/Gopkg.toml new file mode 100644 index 000000000..664fc5955 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/Gopkg.toml @@ -0,0 +1,48 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = [ + # Testing/Example/Codegen dependencies + "github.com/stretchr/testify", + "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/require", + "github.com/go-sql-driver/mysql", + "github.com/gucumber/gucumber", + "github.com/pkg/errors", + "golang.org/x/net", + "golang.org/x/net/html", + "golang.org/x/net/http2", + "golang.org/x/text", + "golang.org/x/text/html", + "golang.org/x/tools", + "golang.org/x/tools/go/loader", +] + + +[[constraint]] + name = "github.com/go-ini/ini" + version = "1.25.4" + +[[constraint]] + name = "github.com/jmespath/go-jmespath" + revision = "0b12d6b5" + #version = "0.2.2" diff --git a/vendor/github.com/aws/aws-sdk-go/Makefile b/vendor/github.com/aws/aws-sdk-go/Makefile index df9803c2a..83ccc1e62 100644 --- a/vendor/github.com/aws/aws-sdk-go/Makefile +++ b/vendor/github.com/aws/aws-sdk-go/Makefile @@ -74,7 +74,7 @@ smoke-tests: get-deps-tests performance: get-deps-tests AWS_TESTING_LOG_RESULTS=${log-detailed} AWS_TESTING_REGION=$(region) AWS_TESTING_DB_TABLE=$(table) gucumber -go-tags "integration" ./awstesting/performance -sandbox-tests: sandbox-test-go15 sandbox-test-go15-novendorexp sandbox-test-go16 sandbox-test-go17 sandbox-test-go18 sandbox-test-gotip +sandbox-tests: sandbox-test-go15 sandbox-test-go15-novendorexp sandbox-test-go16 sandbox-test-go17 sandbox-test-go18 sandbox-test-go19 sandbox-test-gotip sandbox-build-go15: docker build -f ./awstesting/sandbox/Dockerfile.test.go1.5 -t "aws-sdk-go-1.5" . @@ -111,6 +111,13 @@ sandbox-go18: sandbox-build-go18 sandbox-test-go18: sandbox-build-go18 docker run -t aws-sdk-go-1.8 +sandbox-build-go19: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.8 -t "aws-sdk-go-1.9" . +sandbox-go19: sandbox-build-go19 + docker run -i -t aws-sdk-go-1.9 bash +sandbox-test-go19: sandbox-build-go19 + docker run -t aws-sdk-go-1.9 + sandbox-build-gotip: @echo "Run make update-aws-golang-tip, if this test fails because missing aws-golang:tip container" docker build -f ./awstesting/sandbox/Dockerfile.test.gotip -t "aws-sdk-go-tip" . diff --git a/vendor/github.com/aws/aws-sdk-go/README.md b/vendor/github.com/aws/aws-sdk-go/README.md index d5e048a5f..c32774491 100644 --- a/vendor/github.com/aws/aws-sdk-go/README.md +++ b/vendor/github.com/aws/aws-sdk-go/README.md @@ -6,6 +6,8 @@ aws-sdk-go is the official AWS SDK for the Go programming language. Checkout our [release notes](https://github.com/aws/aws-sdk-go/releases) for information about the latest bug fixes, updates, and features added to the SDK. +We [announced](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-2-0-developer-preview/) the Developer Preview for the [v2 AWS SDK for Go](). The v2 SDK is available at https://github.com/aws/aws-sdk-go-v2, and `go get github.com/aws/aws-sdk-go-v2` via `go get`. Check out the v2 SDK's [changes and updates](https://github.com/aws/aws-sdk-go-v2/blob/master/CHANGELOG.md), and let us know what you think. We want your feedback. + ## Installing If you are using Go 1.5 with the `GO15VENDOREXPERIMENT=1` vendoring flag, or 1.6 and higher you can use the following command to retrieve the SDK. The SDK's non-testing dependencies will be included and are vendored in the `vendor` folder. @@ -185,7 +187,7 @@ Option's SharedConfigState parameter. })) ``` -[credentials_pkg]: ttps://docs.aws.amazon.com/sdk-for-go/api/aws/credentials +[credentials_pkg]: https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials ### Configuring AWS Region @@ -305,7 +307,7 @@ documentation for the errors that could be returned. // will leak connections. defer result.Body.Close() - fmt.Println("Object Size:", aws.StringValue(result.ContentLength)) + fmt.Println("Object Size:", aws.Int64Value(result.ContentLength)) ``` ### API Request Pagination and Resource Waiters diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index e25a460fb..63d2df67c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -2,6 +2,7 @@ package client import ( "math/rand" + "strconv" "sync" "time" @@ -38,14 +39,18 @@ func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { minTime := 30 throttle := d.shouldThrottle(r) if throttle { + if delay, ok := getRetryDelay(r); ok { + return delay + } + minTime = 500 } retryCount := r.RetryCount - if retryCount > 13 { - retryCount = 13 - } else if throttle && retryCount > 8 { + if throttle && retryCount > 8 { retryCount = 8 + } else if retryCount > 13 { + retryCount = 13 } delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) @@ -68,12 +73,49 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { // ShouldThrottle returns true if the request should be throttled. func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - if r.HTTPResponse.StatusCode == 502 || - r.HTTPResponse.StatusCode == 503 || - r.HTTPResponse.StatusCode == 504 { - return true + switch r.HTTPResponse.StatusCode { + case 429: + case 502: + case 503: + case 504: + default: + return r.IsErrorThrottle() } - return r.IsErrorThrottle() + + return true +} + +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true +} + +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true } // lockedSource is a thread-safe implementation of rand.Source diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index ae3a28696..4fd0d0724 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -168,7 +168,7 @@ type Config struct { // EC2MetadataDisableTimeoutOverride *bool - // Instructs the endpiont to be generated for a service client to + // Instructs the endpoint to be generated for a service client to // be the dual stack endpoint. The dual stack endpoint will support // both IPv4 and IPv6 addressing. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 07afe3b8e..2cb08182f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -9,6 +9,7 @@ package defaults import ( "fmt" + "net" "net/http" "net/url" "os" @@ -118,14 +119,43 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P return ec2RoleProvider(cfg, handlers) } +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { var errMsg string parsed, err := url.Parse(u) if err != nil { errMsg = fmt.Sprintf("invalid URL, %v", err) - } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") { - errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } } if len(errMsg) > 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 74f72de07..6dc035a53 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "io" + "os" "github.com/aws/aws-sdk-go/aws/awserr" ) @@ -84,11 +85,34 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol custAddEC2Metadata(p) custAddS3DualStack(p) custRmIotDataService(p) + + custFixCloudHSMv2SigningName(p) } return ps, nil } +func custFixCloudHSMv2SigningName(p *partition) { + // Workaround for aws/aws-sdk-go#1745 until the endpoint model can be + // fixed upstream. TODO remove this once the endpoints model is updated. + + s, ok := p.Services["cloudhsmv2"] + if !ok { + return + } + + if len(s.Defaults.CredentialScope.Service) != 0 { + fmt.Fprintf(os.Stderr, "cloudhsmv2 signing name already set, ignoring override.\n") + // If the value is already set don't override + return + } + + s.Defaults.CredentialScope.Service = "cloudhsm" + fmt.Fprintf(os.Stderr, "cloudhsmv2 signing name not set, overriding.\n") + + p.Services["cloudhsmv2"] = s +} + func custAddS3DualStack(p *partition) { if p.ID != "aws" { return diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 899087ec5..689c380b4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -24,6 +24,7 @@ const ( EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). EuWest1RegionID = "eu-west-1" // EU (Ireland). EuWest2RegionID = "eu-west-2" // EU (London). + EuWest3RegionID = "eu-west-3" // EU (Paris). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). UsEast2RegionID = "us-east-2" // US East (Ohio). @@ -33,7 +34,8 @@ const ( // AWS China partition's regions. const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). ) // AWS GovCloud (US) partition's regions. @@ -44,17 +46,20 @@ const ( // Service identifiers const ( AcmServiceID = "acm" // Acm. + ApiPricingServiceID = "api.pricing" // ApiPricing. ApigatewayServiceID = "apigateway" // Apigateway. ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. Appstream2ServiceID = "appstream2" // Appstream2. AthenaServiceID = "athena" // Athena. AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. BatchServiceID = "batch" // Batch. BudgetsServiceID = "budgets" // Budgets. ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. CloudformationServiceID = "cloudformation" // Cloudformation. CloudfrontServiceID = "cloudfront" // Cloudfront. CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. CloudsearchServiceID = "cloudsearch" // Cloudsearch. CloudtrailServiceID = "cloudtrail" // Cloudtrail. CodebuildServiceID = "codebuild" // Codebuild. @@ -68,6 +73,7 @@ const ( ConfigServiceID = "config" // Config. CurServiceID = "cur" // Cur. DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. DevicefarmServiceID = "devicefarm" // Devicefarm. DirectconnectServiceID = "directconnect" // Directconnect. DiscoveryServiceID = "discovery" // Discovery. @@ -91,6 +97,7 @@ const ( FirehoseServiceID = "firehose" // Firehose. GameliftServiceID = "gamelift" // Gamelift. GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. GreengrassServiceID = "greengrass" // Greengrass. HealthServiceID = "health" // Health. IamServiceID = "iam" // Iam. @@ -106,6 +113,7 @@ const ( MachinelearningServiceID = "machinelearning" // Machinelearning. MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. ModelsLexServiceID = "models.lex" // ModelsLex. MonitoringServiceID = "monitoring" // Monitoring. @@ -217,6 +225,9 @@ var awsPartition = partition{ "eu-west-2": region{ Description: "EU (London)", }, + "eu-west-3": region{ + Description: "EU (Paris)", + }, "sa-east-1": region{ Description: "South America (Sao Paulo)", }, @@ -246,6 +257,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -253,6 +265,17 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "api.pricing": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, "apigateway": service{ Endpoints: endpoints{ @@ -265,6 +288,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -290,6 +314,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -316,6 +341,8 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -336,6 +363,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -343,10 +371,27 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "autoscaling-plans", + }, + }, + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "batch": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, @@ -393,6 +438,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -429,6 +475,26 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "cloudsearch": service{ Endpoints: endpoints{ @@ -456,6 +522,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -467,6 +534,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -510,6 +578,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -521,6 +590,8 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -537,12 +608,16 @@ var awsPartition = partition{ "codestar": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -552,6 +627,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, @@ -567,6 +643,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, @@ -582,6 +659,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, @@ -603,6 +681,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -626,6 +705,18 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "devicefarm": service{ Endpoints: endpoints{ @@ -644,6 +735,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -669,6 +761,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -681,14 +774,17 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -706,6 +802,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -734,6 +831,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -756,12 +854,16 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -772,12 +874,16 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -796,6 +902,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -815,6 +922,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -826,6 +934,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -834,7 +943,7 @@ var awsPartition = partition{ }, "elasticloadbalancing": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -846,6 +955,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -870,6 +980,7 @@ var awsPartition = partition{ }, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", @@ -922,6 +1033,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -941,6 +1053,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -951,9 +1064,15 @@ var awsPartition = partition{ "firehose": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "gamelift": service{ @@ -963,10 +1082,15 @@ var awsPartition = partition{ "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -978,23 +1102,36 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, + "glue": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "greengrass": service{ IsRegionalized: boxedTrue, Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "us-east-1": endpoint{}, @@ -1042,6 +1179,7 @@ var awsPartition = partition{ "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-1": endpoint{}, @@ -1079,6 +1217,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1106,6 +1245,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1125,6 +1265,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1159,6 +1300,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1202,6 +1344,12 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "mgh": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, "mobileanalytics": service{ Endpoints: endpoints{ @@ -1232,6 +1380,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1260,6 +1409,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1301,10 +1451,21 @@ var awsPartition = partition{ "polly": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "rds": service{ @@ -1319,6 +1480,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{dnsSuffix}", @@ -1340,6 +1502,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1352,6 +1515,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1381,6 +1545,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, }, }, @@ -1396,26 +1561,27 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "ap-northeast-1": endpoint{ - Hostname: "s3-ap-northeast-1.amazonaws.com", + Hostname: "s3.ap-northeast-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{ - Hostname: "s3-ap-southeast-1.amazonaws.com", + Hostname: "s3.ap-southeast-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "ap-southeast-2": endpoint{ - Hostname: "s3-ap-southeast-2.amazonaws.com", + Hostname: "s3.ap-southeast-2.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{ - Hostname: "s3-eu-west-1.amazonaws.com", + Hostname: "s3.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "s3-external-1": endpoint{ Hostname: "s3-external-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -1424,7 +1590,7 @@ var awsPartition = partition{ }, }, "sa-east-1": endpoint{ - Hostname: "s3-sa-east-1.amazonaws.com", + Hostname: "s3.sa-east-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "us-east-1": endpoint{ @@ -1433,11 +1599,11 @@ var awsPartition = partition{ }, "us-east-2": endpoint{}, "us-west-1": endpoint{ - Hostname: "s3-us-west-1.amazonaws.com", + Hostname: "s3.us-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "us-west-2": endpoint{ - Hostname: "s3-us-west-2.amazonaws.com", + Hostname: "s3.us-west-2.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, }, @@ -1464,14 +1630,19 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1495,19 +1666,24 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, "snowball": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1528,6 +1704,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1550,6 +1727,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "queue.{dnsSuffix}", @@ -1571,6 +1749,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1582,9 +1761,12 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1602,6 +1784,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1626,6 +1809,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -1664,6 +1848,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -1713,6 +1898,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1756,8 +1942,11 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1780,6 +1969,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1830,30 +2020,62 @@ var awscnPartition = partition{ "cn-north-1": region{ Description: "China (Beijing)", }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, }, Services: services{ + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "cloudformation": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "cloudtrail": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "codedeploy": service{ + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + Endpoints: endpoints{ "cn-north-1": endpoint{}, }, @@ -1861,13 +2083,15 @@ var awscnPartition = partition{ "config": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "directconnect": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "dynamodb": service{ @@ -1875,7 +2099,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "ec2": service{ @@ -1883,7 +2108,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "ec2metadata": service{ @@ -1912,21 +2138,24 @@ var awscnPartition = partition{ "elasticache": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "elasticbeanstalk": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "elasticloadbalancing": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "elasticmapreduce": service{ @@ -1934,13 +2163,21 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, }, }, "events": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "glacier": service{ @@ -1948,7 +2185,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "iam": service{ @@ -1964,8 +2202,25 @@ var awscnPartition = partition{ }, }, }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "kinesis": service{ + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lambda": service{ + Endpoints: endpoints{ "cn-north-1": endpoint{}, }, @@ -1973,7 +2228,8 @@ var awscnPartition = partition{ "logs": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "monitoring": service{ @@ -1981,19 +2237,22 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "rds": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "redshift": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "s3": service{ @@ -2001,6 +2260,19 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "snowball": service{ + Endpoints: endpoints{ "cn-north-1": endpoint{}, }, @@ -2010,7 +2282,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "sqs": service{ @@ -2019,13 +2292,15 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "ssm": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "storagegateway": service{ @@ -2042,19 +2317,22 @@ var awscnPartition = partition{ }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "sts": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "swf": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "tagging": service{ @@ -2092,6 +2370,18 @@ var awsusgovPartition = partition{ }, }, Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "autoscaling": service{ Endpoints: endpoints{ @@ -2136,10 +2426,22 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "dms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "dynamodb": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "ec2": service{ @@ -2159,12 +2461,24 @@ var awsusgovPartition = partition{ }, }, }, + "ecs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "elasticloadbalancing": service{ Endpoints: endpoints{ @@ -2268,7 +2582,7 @@ var awsusgovPartition = partition{ }, }, "us-gov-west-1": endpoint{ - Hostname: "s3-us-gov-west-1.amazonaws.com", + Hostname: "s3.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, }, }, @@ -2316,6 +2630,12 @@ var awsusgovPartition = partition{ }, Endpoints: endpoints{ "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "sts": service{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 088ba5290..5c7db4982 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -28,6 +28,10 @@ const ( // during body reads. ErrCodeResponseTimeout = "ResponseTimeout" + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + // CanceledErrorCode is the error code that will be returned by an // API request that was canceled. Requests given a aws.Context may // return this error when canceled. @@ -42,7 +46,6 @@ type Request struct { Retryer Time time.Time - ExpireTime time.Duration Operation *Operation HTTPRequest *http.Request HTTPResponse *http.Response @@ -60,6 +63,11 @@ type Request struct { LastSignedAt time.Time DisableFollowRedirects bool + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + context aws.Context built bool @@ -104,6 +112,8 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) } + SanitizeHostForHeader(httpReq) + r := &Request{ Config: cfg, ClientInfo: clientInfo, @@ -250,34 +260,59 @@ func (r *Request) SetReaderBody(reader io.ReadSeeker) { // Presign returns the request's signed URL. Error will be returned // if the signing fails. -func (r *Request) Presign(expireTime time.Duration) (string, error) { - r.ExpireTime = expireTime +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. r.NotHoist = false + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + if r.Operation.BeforePresignFn != nil { - r = r.copy() - err := r.Operation.BeforePresignFn(r) - if err != nil { - return "", err + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err } } - r.Sign() - if r.Error != nil { - return "", r.Error + if err := r.Sign(); err != nil { + return "", nil, err } - return r.HTTPRequest.URL.String(), nil -} -// PresignRequest behaves just like presign, but hoists all headers and signs them. -// Also returns the signed hash back to the user -func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { - r.ExpireTime = expireTime - r.NotHoist = true - r.Sign() - if r.Error != nil { - return "", nil, r.Error - } return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil } @@ -573,3 +608,72 @@ func shouldRetryCancel(r *Request) bool { errStr != "net/http: request canceled while waiting for connection") } + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index 59de6736b..159518a75 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -142,13 +142,28 @@ func (r *Request) nextPageTokens() []interface{} { tokens := []interface{}{} tokenAdded := false for _, outToken := range r.Operation.OutputTokens { - v, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(v) > 0 { - tokens = append(tokens, v[0]) - tokenAdded = true - } else { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { tokens = append(tokens, nil) + continue } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } + } + + tokenAdded = true + tokens = append(tokens, v) } if !tokenAdded { return nil diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index 4b102f8f2..f1adcf481 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -7,6 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" ) +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + // envConfig is a collection of environment values the SDK will read // setup config from. All environment values are optional. But some values // such as credentials require multiple values to be complete or the values @@ -157,7 +160,7 @@ func envConfigLoad(enableSharedConfig bool) envConfig { if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { cfg.Creds = credentials.Value{} } else { - cfg.Creds.ProviderName = "EnvConfigCredentials" + cfg.Creds.ProviderName = EnvProviderName } regionKeys := regionEnvKeys diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index d68905acb..ccc88b4ac 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -268,7 +268,7 @@ type signingCtx struct { // "X-Amz-Content-Sha256" header with a precomputed value. The signer will // only compute the hash if the request header value is empty. func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, 0, signTime) + return v4.signWithBody(r, body, service, region, 0, false, signTime) } // Presign signs AWS v4 requests with the provided body, service name, region @@ -302,10 +302,10 @@ func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region strin // presigned request's signature you can set the "X-Amz-Content-Sha256" // HTTP header and that will be included in the request's signature. func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, exp, signTime) + return v4.signWithBody(r, body, service, region, exp, true, signTime) } -func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { currentTimeFn := v4.currentTimeFn if currentTimeFn == nil { currentTimeFn = time.Now @@ -317,7 +317,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi Query: r.URL.Query(), Time: signTime, ExpireTime: exp, - isPresign: exp != 0, + isPresign: isPresign, ServiceName: service, Region: region, DisableURIPathEscaping: v4.DisableURIPathEscaping, @@ -339,6 +339,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi return http.Header{}, err } + ctx.sanitizeHostForHeader() ctx.assignAmzQueryValues() ctx.build(v4.DisableHeaderHoisting) @@ -363,6 +364,10 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi return ctx.SignedHeaderVals, nil } +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + func (ctx *signingCtx) handlePresignRemoval() { if !ctx.isPresign { return @@ -467,7 +472,7 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time } signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, signingTime, + name, region, req.ExpireTime, req.ExpireTime > 0, signingTime, ) if err != nil { req.Error = err @@ -502,6 +507,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { ctx.buildTime() // no depends ctx.buildCredentialString() // no depends + ctx.buildBodyDigest() + unsignedHeaders := ctx.Request.Header if ctx.isPresign { if !disableHeaderHoisting { @@ -513,7 +520,6 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { } } - ctx.buildBodyDigest() ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) ctx.buildCanonicalString() // depends on canon headers / signed headers ctx.buildStringToSign() // depends on canon string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 7a4f26e39..b55286b31 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.10.23" +const SDKVersion = "1.12.71" diff --git a/vendor/github.com/aws/aws-sdk-go/buildspec.yml b/vendor/github.com/aws/aws-sdk-go/buildspec.yml new file mode 100644 index 000000000..427208edf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/buildspec.yml @@ -0,0 +1,21 @@ +version: 0.2 + +phases: + build: + commands: + - echo Build started on `date` + - export GOPATH=/go + - export SDK_CB_ROOT=`pwd` + - export SDK_GO_ROOT=/go/src/github.com/aws/aws-sdk-go + - mkdir -p /go/src/github.com/aws + - ln -s $SDK_CB_ROOT $SDK_GO_ROOT + - cd $SDK_GO_ROOT + - make unit + - cd $SDK_CB_ROOT + - #echo Compiling the Go code... + post_build: + commands: + - echo Build completed on `date` +#artifacts: +# files: +# - hello diff --git a/vendor/github.com/aws/aws-sdk-go/doc.go b/vendor/github.com/aws/aws-sdk-go/doc.go index 3e077e51d..32b806a4a 100644 --- a/vendor/github.com/aws/aws-sdk-go/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/doc.go @@ -197,7 +197,7 @@ // regions different from the Session's region. // // svc := s3.New(sess, &aws.Config{ -// Region: aws.String(ednpoints.UsWest2RegionID), +// Region: aws.String(endpoints.UsWest2RegionID), // }) // // See the Config type in the aws package for more information and additional diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go index 6efe43d5f..ec765ba25 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -12,6 +12,7 @@ import ( "strconv" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/private/protocol" ) @@ -49,7 +50,10 @@ func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) err t = "list" } case reflect.Map: - t = "map" + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } } } @@ -210,14 +214,11 @@ func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) erro } buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) default: - switch value.Type() { - case timeType: - converted := v.Interface().(*time.Time) - + switch converted := value.Interface().(type) { + case time.Time: buf.Write(strconv.AppendInt(scratch[:0], converted.UTC().Unix(), 10)) - case byteSliceType: + case []byte: if !value.IsNil() { - converted := value.Interface().([]byte) buf.WriteByte('"') if len(converted) < 1024 { // for small buffers, using Encode directly is much faster. @@ -233,6 +234,12 @@ func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) erro } buf.WriteByte('"') } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) default: return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go index fea535613..037e1e7be 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -8,6 +8,9 @@ import ( "io/ioutil" "reflect" "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" ) // UnmarshalJSON reads a stream and unmarshals the results in object v. @@ -50,7 +53,10 @@ func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) t = "list" } case reflect.Map: - t = "map" + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } } } @@ -183,6 +189,13 @@ func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTa return err } value.Set(reflect.ValueOf(b)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) default: return errf() } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 000000000..776d11018 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 524ca952a..5ce9cba32 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -121,6 +121,10 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string return nil } + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + // check for unflattened list member if !q.isEC2 && tag.Get("flattened") == "" { if listName := tag.Get("locationNameList"); listName == "" { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 716183564..c405288d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -4,7 +4,6 @@ package rest import ( "bytes" "encoding/base64" - "encoding/json" "fmt" "io" "net/http" @@ -18,6 +17,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" ) // RFC822 returns an RFC822 formatted timestamp for AWS protocols @@ -252,13 +252,12 @@ func EscapePath(path string, encodeSep bool) string { return buf.String() } -func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { v = reflect.Indirect(v) if !v.IsValid() { return "", errValueNotSet } - var str string switch value := v.Interface().(type) { case string: str = value @@ -273,17 +272,19 @@ func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { case time.Time: str = value.UTC().Format(RFC822) case aws.JSONValue: - b, err := json.Marshal(value) - if err != nil { - return "", err + if len(value) == 0 { + return "", errValueNotSet } + escaping := protocol.NoEscape if tag.Get("location") == "header" { - str = base64.StdEncoding.EncodeToString(b) - } else { - str = string(b) + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) } default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) return "", err } return str, nil diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 7a779ee22..823f045ee 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -3,7 +3,6 @@ package rest import ( "bytes" "encoding/base64" - "encoding/json" "fmt" "io" "io/ioutil" @@ -16,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" ) // UnmarshalHandler is a named request handler for unmarshaling rest protocol requests @@ -204,17 +204,11 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro } v.Set(reflect.ValueOf(&t)) case aws.JSONValue: - b := []byte(header) - var err error + escaping := protocol.NoEscape if tag.Get("location") == "header" { - b, err = base64.StdEncoding.DecodeString(header) - if err != nil { - return err - } + escaping = protocol.Base64Escape } - - m := aws.JSONValue{} - err = json.Unmarshal(b, &m) + m, err := protocol.DecodeJSONValue(header, escaping) if err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 4598ccd82..1743b3449 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -5428,6 +5428,10 @@ func (c *EC2) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectio // the requester VPC. The requester VPC and accepter VPC cannot have overlapping // CIDR blocks. // +// Limitations and rules apply to a VPC peering connection. For more information, +// see the limitations (http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/vpc-peering-basics.html#vpc-peering-limitations) +// section in the VPC Peering Guide. +// // The owner of the accepter VPC must accept the peering request to activate // the peering connection. The VPC peering connection request expires after // 7 days, after which it cannot be accepted or rejected. @@ -18933,9 +18937,9 @@ func (c *EC2) ModifyVpcEndpointServicePermissionsRequest(input *ModifyVpcEndpoin // ModifyVpcEndpointServicePermissions API operation for Amazon Elastic Compute Cloud. // -// Modifies the permissions for your VPC endpoint service. You can add or remove -// permissions for service consumers (IAM users, IAM roles, and AWS accounts) -// to discover your endpoint service. +// Modifies the permissions for your VPC endpoint service (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/endpoint-service.html). +// You can add or remove permissions for service consumers (IAM users, IAM roles, +// and AWS accounts) to connect to your endpoint service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -29784,7 +29788,7 @@ type CreateVpcEndpointInput struct { SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` // The service name. To get a list of available services, use the DescribeVpcEndpointServices - // request. + // request, or get the name from the service provider. // // ServiceName is a required field ServiceName *string `type:"string" required:"true"` @@ -33150,6 +33154,18 @@ type DescribeAddressesInput struct { // the Elastic IP address. // // * public-ip - The Elastic IP address. + // + // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. + // + // * tag-key - The key of a tag assigned to the resource. This filter is + // independent of the tag-value filter. For example, if you use both the + // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources + // assigned both the tag key Purpose (regardless of what the tag's value + // is), and the tag value X (regardless of the tag's key). If you want to + // list only resources where Purpose is X, see the tag:key=value filter. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // [EC2-Classic] One or more Elastic IP addresses. @@ -36657,6 +36673,18 @@ type DescribeLaunchTemplatesInput struct { // * create-time - The time the launch template was created. // // * launch-template-name - The name of the launch template. + // + // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. + // + // * tag-key - The key of a tag assigned to the resource. This filter is + // independent of the tag-value filter. For example, if you use both the + // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources + // assigned both the tag key Purpose (regardless of what the tag's value + // is), and the tag value X (regardless of the tag's key). If you want to + // list only resources where Purpose is X, see the tag:key=value filter. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more launch template IDs. @@ -40335,9 +40363,10 @@ type DescribeTagsInput struct { // * resource-id - The resource ID. // // * resource-type - The resource type (customer-gateway | dhcp-options | - // image | instance | internet-gateway | network-acl | network-interface - // | reserved-instances | route-table | security-group | snapshot | spot-instances-request - // | subnet | volume | vpc | vpn-connection | vpn-gateway). + // elastic-ip | fpga-image | image | instance | internet-gateway | launch-template + // | natgateway | network-acl | network-interface | reserved-instances | + // route-table | security-group | snapshot | spot-instances-request | subnet + // | volume | vpc | vpc-peering-connection | vpn-connection | vpn-gateway). // // * value - The tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -49533,7 +49562,8 @@ func (s *InternetGateway) SetTags(v []*Tag) *InternetGateway { type InternetGatewayAttachment struct { _ struct{} `type:"structure"` - // The current state of the attachment. + // The current state of the attachment. For an Internet gateway, the state is + // available when attached to a VPC; otherwise, this value is not returned. State *string `locationName:"state" type:"string" enum:"AttachmentStatus"` // The ID of the VPC. @@ -60763,7 +60793,9 @@ type RunInstancesInput struct { // The IAM instance profile. IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` - // The ID of the AMI, which you can get by calling DescribeImages. + // The ID of the AMI, which you can get by calling DescribeImages. An AMI is + // required to launch an instance and must be specified here or in a launch + // template. ImageId *string `type:"string"` // Indicates whether an instance stops or terminates when you initiate shutdown @@ -66149,13 +66181,19 @@ type UserIdGroupPair struct { // The name of the security group. In a request, use this parameter for a security // group in EC2-Classic or a default VPC only. For a security group in a nondefault // VPC, use the security group ID. + // + // For a referenced security group in another VPC, this value is not returned + // if the referenced security group is deleted. GroupName *string `locationName:"groupName" type:"string"` // The status of a VPC peering connection, if applicable. PeeringStatus *string `locationName:"peeringStatus" type:"string"` - // The ID of an AWS account. For a referenced security group in another VPC, - // the account ID of the referenced security group is returned. + // The ID of an AWS account. + // + // For a referenced security group in another VPC, the account ID of the referenced + // security group is returned in the response. If the referenced security group + // is deleted, this value is not returned. // // [EC2-Classic] Required when adding or removing rules that reference a security // group in another AWS account. diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go index 1ba51125e..432a54df4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go @@ -4,9 +4,8 @@ // requests to Amazon Elastic Compute Cloud. // // Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity -// in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your -// need to invest in hardware up front, so you can develop and deploy applications -// faster. +// in the AWS Cloud. Using Amazon EC2 eliminates your need to invest in hardware +// up front, so you can develop and deploy applications faster. // // See https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go index f9136852f..984aec965 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -35,7 +35,7 @@ const opBatchCheckLayerAvailability = "BatchCheckLayerAvailability" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailability +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailability func (c *ECR) BatchCheckLayerAvailabilityRequest(input *BatchCheckLayerAvailabilityInput) (req *request.Request, output *BatchCheckLayerAvailabilityOutput) { op := &request.Operation{ Name: opBatchCheckLayerAvailability, @@ -80,7 +80,7 @@ func (c *ECR) BatchCheckLayerAvailabilityRequest(input *BatchCheckLayerAvailabil // * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailability +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailability func (c *ECR) BatchCheckLayerAvailability(input *BatchCheckLayerAvailabilityInput) (*BatchCheckLayerAvailabilityOutput, error) { req, out := c.BatchCheckLayerAvailabilityRequest(input) return out, req.Send() @@ -127,7 +127,7 @@ const opBatchDeleteImage = "BatchDeleteImage" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImage +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImage func (c *ECR) BatchDeleteImageRequest(input *BatchDeleteImageInput) (req *request.Request, output *BatchDeleteImageOutput) { op := &request.Operation{ Name: opBatchDeleteImage, @@ -175,7 +175,7 @@ func (c *ECR) BatchDeleteImageRequest(input *BatchDeleteImageInput) (req *reques // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImage +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImage func (c *ECR) BatchDeleteImage(input *BatchDeleteImageInput) (*BatchDeleteImageOutput, error) { req, out := c.BatchDeleteImageRequest(input) return out, req.Send() @@ -222,7 +222,7 @@ const opBatchGetImage = "BatchGetImage" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImage +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImage func (c *ECR) BatchGetImageRequest(input *BatchGetImageInput) (req *request.Request, output *BatchGetImageOutput) { op := &request.Operation{ Name: opBatchGetImage, @@ -263,7 +263,7 @@ func (c *ECR) BatchGetImageRequest(input *BatchGetImageInput) (req *request.Requ // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImage +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImage func (c *ECR) BatchGetImage(input *BatchGetImageInput) (*BatchGetImageOutput, error) { req, out := c.BatchGetImageRequest(input) return out, req.Send() @@ -310,7 +310,7 @@ const opCompleteLayerUpload = "CompleteLayerUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req *request.Request, output *CompleteLayerUploadOutput) { op := &request.Operation{ Name: opCompleteLayerUpload, @@ -329,9 +329,9 @@ func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req * // CompleteLayerUpload API operation for Amazon EC2 Container Registry. // -// Inform Amazon ECR that the image layer upload for a specified registry, repository -// name, and upload ID, has completed. You can optionally provide a sha256 digest -// of the image layer for data validation purposes. +// Informs Amazon ECR that the image layer upload has completed for a specified +// registry, repository name, and upload ID. You can optionally provide a sha256 +// digest of the image layer for data validation purposes. // // This operation is used by the Amazon ECR proxy, and it is not intended for // general use by customers for pulling and pushing images. In most cases, you @@ -373,7 +373,7 @@ func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req * // * ErrCodeEmptyUploadException "EmptyUploadException" // The specified layer upload does not contain any layer parts. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload func (c *ECR) CompleteLayerUpload(input *CompleteLayerUploadInput) (*CompleteLayerUploadOutput, error) { req, out := c.CompleteLayerUploadRequest(input) return out, req.Send() @@ -420,7 +420,7 @@ const opCreateRepository = "CreateRepository" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *request.Request, output *CreateRepositoryOutput) { op := &request.Operation{ Name: opCreateRepository, @@ -465,7 +465,7 @@ func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *reques // (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) // in the Amazon EC2 Container Registry User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository func (c *ECR) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) { req, out := c.CreateRepositoryRequest(input) return out, req.Send() @@ -487,6 +487,96 @@ func (c *ECR) CreateRepositoryWithContext(ctx aws.Context, input *CreateReposito return out, req.Send() } +const opDeleteLifecyclePolicy = "DeleteLifecyclePolicy" + +// DeleteLifecyclePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLifecyclePolicy operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLifecyclePolicy for more information on using the DeleteLifecyclePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLifecyclePolicyRequest method. +// req, resp := client.DeleteLifecyclePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicy +func (c *ECR) DeleteLifecyclePolicyRequest(input *DeleteLifecyclePolicyInput) (req *request.Request, output *DeleteLifecyclePolicyOutput) { + op := &request.Operation{ + Name: opDeleteLifecyclePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLifecyclePolicyInput{} + } + + output = &DeleteLifecyclePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteLifecyclePolicy API operation for Amazon EC2 Container Registry. +// +// Deletes the specified lifecycle policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation DeleteLifecyclePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicy +func (c *ECR) DeleteLifecyclePolicy(input *DeleteLifecyclePolicyInput) (*DeleteLifecyclePolicyOutput, error) { + req, out := c.DeleteLifecyclePolicyRequest(input) + return out, req.Send() +} + +// DeleteLifecyclePolicyWithContext is the same as DeleteLifecyclePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLifecyclePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DeleteLifecyclePolicyWithContext(ctx aws.Context, input *DeleteLifecyclePolicyInput, opts ...request.Option) (*DeleteLifecyclePolicyOutput, error) { + req, out := c.DeleteLifecyclePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteRepository = "DeleteRepository" // DeleteRepositoryRequest generates a "aws/request.Request" representing the @@ -512,7 +602,7 @@ const opDeleteRepository = "DeleteRepository" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *request.Request, output *DeleteRepositoryOutput) { op := &request.Operation{ Name: opDeleteRepository, @@ -557,7 +647,7 @@ func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *reques // The specified repository contains images. To delete a repository that contains // images, you must force the deletion with the force parameter. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository func (c *ECR) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) { req, out := c.DeleteRepositoryRequest(input) return out, req.Send() @@ -604,7 +694,7 @@ const opDeleteRepositoryPolicy = "DeleteRepositoryPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicy func (c *ECR) DeleteRepositoryPolicyRequest(input *DeleteRepositoryPolicyInput) (req *request.Request, output *DeleteRepositoryPolicyOutput) { op := &request.Operation{ Name: opDeleteRepositoryPolicy, @@ -648,7 +738,7 @@ func (c *ECR) DeleteRepositoryPolicyRequest(input *DeleteRepositoryPolicyInput) // The specified repository and registry combination does not have an associated // repository policy. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicy func (c *ECR) DeleteRepositoryPolicy(input *DeleteRepositoryPolicyInput) (*DeleteRepositoryPolicyOutput, error) { req, out := c.DeleteRepositoryPolicyRequest(input) return out, req.Send() @@ -695,7 +785,7 @@ const opDescribeImages = "DescribeImages" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImages +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImages func (c *ECR) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Request, output *DescribeImagesOutput) { op := &request.Operation{ Name: opDescribeImages, @@ -750,7 +840,7 @@ func (c *ECR) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Re // * ErrCodeImageNotFoundException "ImageNotFoundException" // The image requested does not exist in the specified repository. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImages +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImages func (c *ECR) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, error) { req, out := c.DescribeImagesRequest(input) return out, req.Send() @@ -847,7 +937,7 @@ const opDescribeRepositories = "DescribeRepositories" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositories +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositories func (c *ECR) DescribeRepositoriesRequest(input *DescribeRepositoriesInput) (req *request.Request, output *DescribeRepositoriesOutput) { op := &request.Operation{ Name: opDescribeRepositories, @@ -893,7 +983,7 @@ func (c *ECR) DescribeRepositoriesRequest(input *DescribeRepositoriesInput) (req // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositories +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositories func (c *ECR) DescribeRepositories(input *DescribeRepositoriesInput) (*DescribeRepositoriesOutput, error) { req, out := c.DescribeRepositoriesRequest(input) return out, req.Send() @@ -990,7 +1080,7 @@ const opGetAuthorizationToken = "GetAuthorizationToken" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationToken func (c *ECR) GetAuthorizationTokenRequest(input *GetAuthorizationTokenInput) (req *request.Request, output *GetAuthorizationTokenOutput) { op := &request.Operation{ Name: opGetAuthorizationToken, @@ -1033,7 +1123,7 @@ func (c *ECR) GetAuthorizationTokenRequest(input *GetAuthorizationTokenInput) (r // The specified parameter is invalid. Review the available parameters for the // API request. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationToken func (c *ECR) GetAuthorizationToken(input *GetAuthorizationTokenInput) (*GetAuthorizationTokenOutput, error) { req, out := c.GetAuthorizationTokenRequest(input) return out, req.Send() @@ -1080,7 +1170,7 @@ const opGetDownloadUrlForLayer = "GetDownloadUrlForLayer" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayer +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayer func (c *ECR) GetDownloadUrlForLayerRequest(input *GetDownloadUrlForLayerInput) (req *request.Request, output *GetDownloadUrlForLayerOutput) { op := &request.Operation{ Name: opGetDownloadUrlForLayer, @@ -1133,7 +1223,7 @@ func (c *ECR) GetDownloadUrlForLayerRequest(input *GetDownloadUrlForLayerInput) // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayer +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayer func (c *ECR) GetDownloadUrlForLayer(input *GetDownloadUrlForLayerInput) (*GetDownloadUrlForLayerOutput, error) { req, out := c.GetDownloadUrlForLayerRequest(input) return out, req.Send() @@ -1155,6 +1245,186 @@ func (c *ECR) GetDownloadUrlForLayerWithContext(ctx aws.Context, input *GetDownl return out, req.Send() } +const opGetLifecyclePolicy = "GetLifecyclePolicy" + +// GetLifecyclePolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetLifecyclePolicy operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLifecyclePolicy for more information on using the GetLifecyclePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetLifecyclePolicyRequest method. +// req, resp := client.GetLifecyclePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicy +func (c *ECR) GetLifecyclePolicyRequest(input *GetLifecyclePolicyInput) (req *request.Request, output *GetLifecyclePolicyOutput) { + op := &request.Operation{ + Name: opGetLifecyclePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLifecyclePolicyInput{} + } + + output = &GetLifecyclePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLifecyclePolicy API operation for Amazon EC2 Container Registry. +// +// Retrieves the specified lifecycle policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation GetLifecyclePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicy +func (c *ECR) GetLifecyclePolicy(input *GetLifecyclePolicyInput) (*GetLifecyclePolicyOutput, error) { + req, out := c.GetLifecyclePolicyRequest(input) + return out, req.Send() +} + +// GetLifecyclePolicyWithContext is the same as GetLifecyclePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetLifecyclePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetLifecyclePolicyWithContext(ctx aws.Context, input *GetLifecyclePolicyInput, opts ...request.Option) (*GetLifecyclePolicyOutput, error) { + req, out := c.GetLifecyclePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetLifecyclePolicyPreview = "GetLifecyclePolicyPreview" + +// GetLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the +// client's request for the GetLifecyclePolicyPreview operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLifecyclePolicyPreview for more information on using the GetLifecyclePolicyPreview +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetLifecyclePolicyPreviewRequest method. +// req, resp := client.GetLifecyclePolicyPreviewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreview +func (c *ECR) GetLifecyclePolicyPreviewRequest(input *GetLifecyclePolicyPreviewInput) (req *request.Request, output *GetLifecyclePolicyPreviewOutput) { + op := &request.Operation{ + Name: opGetLifecyclePolicyPreview, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLifecyclePolicyPreviewInput{} + } + + output = &GetLifecyclePolicyPreviewOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. +// +// Retrieves the results of the specified lifecycle policy preview request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation GetLifecyclePolicyPreview for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyPreviewNotFoundException "LifecyclePolicyPreviewNotFoundException" +// There is no dry run for this repository. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreview +func (c *ECR) GetLifecyclePolicyPreview(input *GetLifecyclePolicyPreviewInput) (*GetLifecyclePolicyPreviewOutput, error) { + req, out := c.GetLifecyclePolicyPreviewRequest(input) + return out, req.Send() +} + +// GetLifecyclePolicyPreviewWithContext is the same as GetLifecyclePolicyPreview with the addition of +// the ability to pass a context and additional request options. +// +// See GetLifecyclePolicyPreview for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetLifecyclePolicyPreviewWithContext(ctx aws.Context, input *GetLifecyclePolicyPreviewInput, opts ...request.Option) (*GetLifecyclePolicyPreviewOutput, error) { + req, out := c.GetLifecyclePolicyPreviewRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetRepositoryPolicy = "GetRepositoryPolicy" // GetRepositoryPolicyRequest generates a "aws/request.Request" representing the @@ -1180,7 +1450,7 @@ const opGetRepositoryPolicy = "GetRepositoryPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicy func (c *ECR) GetRepositoryPolicyRequest(input *GetRepositoryPolicyInput) (req *request.Request, output *GetRepositoryPolicyOutput) { op := &request.Operation{ Name: opGetRepositoryPolicy, @@ -1224,7 +1494,7 @@ func (c *ECR) GetRepositoryPolicyRequest(input *GetRepositoryPolicyInput) (req * // The specified repository and registry combination does not have an associated // repository policy. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicy func (c *ECR) GetRepositoryPolicy(input *GetRepositoryPolicyInput) (*GetRepositoryPolicyOutput, error) { req, out := c.GetRepositoryPolicyRequest(input) return out, req.Send() @@ -1271,7 +1541,7 @@ const opInitiateLayerUpload = "InitiateLayerUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req *request.Request, output *InitiateLayerUploadOutput) { op := &request.Operation{ Name: opInitiateLayerUpload, @@ -1315,7 +1585,7 @@ func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req * // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload func (c *ECR) InitiateLayerUpload(input *InitiateLayerUploadInput) (*InitiateLayerUploadOutput, error) { req, out := c.InitiateLayerUploadRequest(input) return out, req.Send() @@ -1362,7 +1632,7 @@ const opListImages = "ListImages" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImages +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImages func (c *ECR) ListImagesRequest(input *ListImagesInput) (req *request.Request, output *ListImagesOutput) { op := &request.Operation{ Name: opListImages, @@ -1414,7 +1684,7 @@ func (c *ECR) ListImagesRequest(input *ListImagesInput) (req *request.Request, o // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImages +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImages func (c *ECR) ListImages(input *ListImagesInput) (*ListImagesOutput, error) { req, out := c.ListImagesRequest(input) return out, req.Send() @@ -1511,7 +1781,7 @@ const opPutImage = "PutImage" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, output *PutImageOutput) { op := &request.Operation{ Name: opPutImage, @@ -1556,8 +1826,8 @@ func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, outpu // repository and ensure that you are performing operations on the correct registry. // // * ErrCodeImageAlreadyExistsException "ImageAlreadyExistsException" -// The specified image has already been pushed, and there are no changes to -// the manifest or image tag since the last push. +// The specified image has already been pushed, and there were no changes to +// the manifest or image tag after the last push. // // * ErrCodeLayersNotFoundException "LayersNotFoundException" // The specified layers could not be found, or the specified layer is not valid @@ -1569,7 +1839,7 @@ func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, outpu // (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) // in the Amazon EC2 Container Registry User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage func (c *ECR) PutImage(input *PutImageInput) (*PutImageOutput, error) { req, out := c.PutImageRequest(input) return out, req.Send() @@ -1591,6 +1861,93 @@ func (c *ECR) PutImageWithContext(ctx aws.Context, input *PutImageInput, opts .. return out, req.Send() } +const opPutLifecyclePolicy = "PutLifecyclePolicy" + +// PutLifecyclePolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutLifecyclePolicy operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutLifecyclePolicy for more information on using the PutLifecyclePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutLifecyclePolicyRequest method. +// req, resp := client.PutLifecyclePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy +func (c *ECR) PutLifecyclePolicyRequest(input *PutLifecyclePolicyInput) (req *request.Request, output *PutLifecyclePolicyOutput) { + op := &request.Operation{ + Name: opPutLifecyclePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutLifecyclePolicyInput{} + } + + output = &PutLifecyclePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutLifecyclePolicy API operation for Amazon EC2 Container Registry. +// +// Creates or updates a lifecycle policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation PutLifecyclePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy +func (c *ECR) PutLifecyclePolicy(input *PutLifecyclePolicyInput) (*PutLifecyclePolicyOutput, error) { + req, out := c.PutLifecyclePolicyRequest(input) + return out, req.Send() +} + +// PutLifecyclePolicyWithContext is the same as PutLifecyclePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutLifecyclePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) PutLifecyclePolicyWithContext(ctx aws.Context, input *PutLifecyclePolicyInput, opts ...request.Option) (*PutLifecyclePolicyOutput, error) { + req, out := c.PutLifecyclePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opSetRepositoryPolicy = "SetRepositoryPolicy" // SetRepositoryPolicyRequest generates a "aws/request.Request" representing the @@ -1616,7 +1973,7 @@ const opSetRepositoryPolicy = "SetRepositoryPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req *request.Request, output *SetRepositoryPolicyOutput) { op := &request.Operation{ Name: opSetRepositoryPolicy, @@ -1656,7 +2013,7 @@ func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req * // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetRepositoryPolicyOutput, error) { req, out := c.SetRepositoryPolicyRequest(input) return out, req.Send() @@ -1678,6 +2035,101 @@ func (c *ECR) SetRepositoryPolicyWithContext(ctx aws.Context, input *SetReposito return out, req.Send() } +const opStartLifecyclePolicyPreview = "StartLifecyclePolicyPreview" + +// StartLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the +// client's request for the StartLifecyclePolicyPreview operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartLifecyclePolicyPreview for more information on using the StartLifecyclePolicyPreview +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartLifecyclePolicyPreviewRequest method. +// req, resp := client.StartLifecyclePolicyPreviewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview +func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPreviewInput) (req *request.Request, output *StartLifecyclePolicyPreviewOutput) { + op := &request.Operation{ + Name: opStartLifecyclePolicyPreview, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartLifecyclePolicyPreviewInput{} + } + + output = &StartLifecyclePolicyPreviewOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. +// +// Starts a preview of the specified lifecycle policy. This allows you to see +// the results before creating the lifecycle policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation StartLifecyclePolicyPreview for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// * ErrCodeLifecyclePolicyPreviewInProgressException "LifecyclePolicyPreviewInProgressException" +// The previous lifecycle policy preview request has not completed. Please try +// again later. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview +func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) { + req, out := c.StartLifecyclePolicyPreviewRequest(input) + return out, req.Send() +} + +// StartLifecyclePolicyPreviewWithContext is the same as StartLifecyclePolicyPreview with the addition of +// the ability to pass a context and additional request options. +// +// See StartLifecyclePolicyPreview for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) StartLifecyclePolicyPreviewWithContext(ctx aws.Context, input *StartLifecyclePolicyPreviewInput, opts ...request.Option) (*StartLifecyclePolicyPreviewOutput, error) { + req, out := c.StartLifecyclePolicyPreviewRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUploadLayerPart = "UploadLayerPart" // UploadLayerPartRequest generates a "aws/request.Request" representing the @@ -1703,7 +2155,7 @@ const opUploadLayerPart = "UploadLayerPart" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request.Request, output *UploadLayerPartOutput) { op := &request.Operation{ Name: opUploadLayerPart, @@ -1761,7 +2213,7 @@ func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request. // (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) // in the Amazon EC2 Container Registry User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart func (c *ECR) UploadLayerPart(input *UploadLayerPartInput) (*UploadLayerPartOutput, error) { req, out := c.UploadLayerPartRequest(input) return out, req.Send() @@ -1784,7 +2236,7 @@ func (c *ECR) UploadLayerPartWithContext(ctx aws.Context, input *UploadLayerPart } // An object representing authorization data for an Amazon ECR registry. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/AuthorizationData +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/AuthorizationData type AuthorizationData struct { _ struct{} `type:"structure"` @@ -1831,7 +2283,7 @@ func (s *AuthorizationData) SetProxyEndpoint(v string) *AuthorizationData { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailabilityRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailabilityRequest type BatchCheckLayerAvailabilityInput struct { _ struct{} `type:"structure"` @@ -1900,7 +2352,7 @@ func (s *BatchCheckLayerAvailabilityInput) SetRepositoryName(v string) *BatchChe return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailabilityResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailabilityResponse type BatchCheckLayerAvailabilityOutput struct { _ struct{} `type:"structure"` @@ -1936,7 +2388,7 @@ func (s *BatchCheckLayerAvailabilityOutput) SetLayers(v []*Layer) *BatchCheckLay // Deletes specified images within a specified repository. Images are specified // with either the imageTag or imageDigest. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImageRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImageRequest type BatchDeleteImageInput struct { _ struct{} `type:"structure"` @@ -2006,7 +2458,7 @@ func (s *BatchDeleteImageInput) SetRepositoryName(v string) *BatchDeleteImageInp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImageResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImageResponse type BatchDeleteImageOutput struct { _ struct{} `type:"structure"` @@ -2039,7 +2491,7 @@ func (s *BatchDeleteImageOutput) SetImageIds(v []*ImageIdentifier) *BatchDeleteI return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImageRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImageRequest type BatchGetImageInput struct { _ struct{} `type:"structure"` @@ -2124,7 +2576,7 @@ func (s *BatchGetImageInput) SetRepositoryName(v string) *BatchGetImageInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImageResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImageResponse type BatchGetImageOutput struct { _ struct{} `type:"structure"` @@ -2157,7 +2609,7 @@ func (s *BatchGetImageOutput) SetImages(v []*Image) *BatchGetImageOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUploadRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUploadRequest type CompleteLayerUploadInput struct { _ struct{} `type:"structure"` @@ -2241,7 +2693,7 @@ func (s *CompleteLayerUploadInput) SetUploadId(v string) *CompleteLayerUploadInp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUploadResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUploadResponse type CompleteLayerUploadOutput struct { _ struct{} `type:"structure"` @@ -2292,7 +2744,7 @@ func (s *CompleteLayerUploadOutput) SetUploadId(v string) *CompleteLayerUploadOu return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepositoryRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepositoryRequest type CreateRepositoryInput struct { _ struct{} `type:"structure"` @@ -2336,7 +2788,7 @@ func (s *CreateRepositoryInput) SetRepositoryName(v string) *CreateRepositoryInp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepositoryResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepositoryResponse type CreateRepositoryOutput struct { _ struct{} `type:"structure"` @@ -2360,11 +2812,115 @@ func (s *CreateRepositoryOutput) SetRepository(v *Repository) *CreateRepositoryO return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicyRequest +type DeleteLifecyclePolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the repository policy + // to
 delete. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLifecyclePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLifecyclePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLifecyclePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLifecyclePolicyInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DeleteLifecyclePolicyInput) SetRegistryId(v string) *DeleteLifecyclePolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteLifecyclePolicyInput) SetRepositoryName(v string) *DeleteLifecyclePolicyInput { + s.RepositoryName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicyResponse +type DeleteLifecyclePolicyOutput struct { + _ struct{} `type:"structure"` + + // The time stamp of the last time that the lifecycle policy was run. + LastEvaluatedAt *time.Time `locationName:"lastEvaluatedAt" type:"timestamp" timestampFormat:"unix"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s DeleteLifecyclePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLifecyclePolicyOutput) GoString() string { + return s.String() +} + +// SetLastEvaluatedAt sets the LastEvaluatedAt field's value. +func (s *DeleteLifecyclePolicyOutput) SetLastEvaluatedAt(v time.Time) *DeleteLifecyclePolicyOutput { + s.LastEvaluatedAt = &v + return s +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *DeleteLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *DeleteLifecyclePolicyOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DeleteLifecyclePolicyOutput) SetRegistryId(v string) *DeleteLifecyclePolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteLifecyclePolicyOutput) SetRepositoryName(v string) *DeleteLifecyclePolicyOutput { + s.RepositoryName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryRequest type DeleteRepositoryInput struct { _ struct{} `type:"structure"` - // Force the deletion of the repository if it contains images. + // If a repository contains images, forces the deletion. Force *bool `locationName:"force" type:"boolean"` // The AWS account ID associated with the registry that contains the repository @@ -2421,7 +2977,7 @@ func (s *DeleteRepositoryInput) SetRepositoryName(v string) *DeleteRepositoryInp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryResponse type DeleteRepositoryOutput struct { _ struct{} `type:"structure"` @@ -2445,7 +3001,7 @@ func (s *DeleteRepositoryOutput) SetRepository(v *Repository) *DeleteRepositoryO return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicyRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicyRequest type DeleteRepositoryPolicyInput struct { _ struct{} `type:"structure"` @@ -2499,7 +3055,7 @@ func (s *DeleteRepositoryPolicyInput) SetRepositoryName(v string) *DeleteReposit return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicyResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicyResponse type DeleteRepositoryPolicyOutput struct { _ struct{} `type:"structure"` @@ -2542,7 +3098,7 @@ func (s *DeleteRepositoryPolicyOutput) SetRepositoryName(v string) *DeleteReposi } // An object representing a filter on a DescribeImages operation. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImagesFilter +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImagesFilter type DescribeImagesFilter struct { _ struct{} `type:"structure"` @@ -2567,7 +3123,7 @@ func (s *DescribeImagesFilter) SetTagStatus(v string) *DescribeImagesFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImagesRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImagesRequest type DescribeImagesInput struct { _ struct{} `type:"structure"` @@ -2672,7 +3228,7 @@ func (s *DescribeImagesInput) SetRepositoryName(v string) *DescribeImagesInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImagesResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImagesResponse type DescribeImagesOutput struct { _ struct{} `type:"structure"` @@ -2708,7 +3264,7 @@ func (s *DescribeImagesOutput) SetNextToken(v string) *DescribeImagesOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositoriesRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositoriesRequest type DescribeRepositoriesInput struct { _ struct{} `type:"structure"` @@ -2791,7 +3347,7 @@ func (s *DescribeRepositoriesInput) SetRepositoryNames(v []*string) *DescribeRep return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositoriesResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositoriesResponse type DescribeRepositoriesOutput struct { _ struct{} `type:"structure"` @@ -2827,7 +3383,7 @@ func (s *DescribeRepositoriesOutput) SetRepositories(v []*Repository) *DescribeR return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationTokenRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationTokenRequest type GetAuthorizationTokenInput struct { _ struct{} `type:"structure"` @@ -2866,7 +3422,7 @@ func (s *GetAuthorizationTokenInput) SetRegistryIds(v []*string) *GetAuthorizati return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationTokenResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationTokenResponse type GetAuthorizationTokenOutput struct { _ struct{} `type:"structure"` @@ -2891,7 +3447,7 @@ func (s *GetAuthorizationTokenOutput) SetAuthorizationData(v []*AuthorizationDat return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayerRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayerRequest type GetDownloadUrlForLayerInput struct { _ struct{} `type:"structure"` @@ -2957,7 +3513,7 @@ func (s *GetDownloadUrlForLayerInput) SetRepositoryName(v string) *GetDownloadUr return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayerResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayerResponse type GetDownloadUrlForLayerOutput struct { _ struct{} `type:"structure"` @@ -2990,7 +3546,297 @@ func (s *GetDownloadUrlForLayerOutput) SetLayerDigest(v string) *GetDownloadUrlF return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicyRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyRequest +type GetLifecyclePolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository with the policy to retrieve. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetLifecyclePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLifecyclePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLifecyclePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLifecyclePolicyInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetLifecyclePolicyInput) SetRegistryId(v string) *GetLifecyclePolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetLifecyclePolicyInput) SetRepositoryName(v string) *GetLifecyclePolicyInput { + s.RepositoryName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyResponse +type GetLifecyclePolicyOutput struct { + _ struct{} `type:"structure"` + + // The time stamp of the last time that the lifecycle policy was run. + LastEvaluatedAt *time.Time `locationName:"lastEvaluatedAt" type:"timestamp" timestampFormat:"unix"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s GetLifecyclePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLifecyclePolicyOutput) GoString() string { + return s.String() +} + +// SetLastEvaluatedAt sets the LastEvaluatedAt field's value. +func (s *GetLifecyclePolicyOutput) SetLastEvaluatedAt(v time.Time) *GetLifecyclePolicyOutput { + s.LastEvaluatedAt = &v + return s +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *GetLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *GetLifecyclePolicyOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetLifecyclePolicyOutput) SetRegistryId(v string) *GetLifecyclePolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetLifecyclePolicyOutput) SetRepositoryName(v string) *GetLifecyclePolicyOutput { + s.RepositoryName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreviewRequest +type GetLifecyclePolicyPreviewInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that filters results based on image tag status and + // all tags, if tagged. + Filter *LifecyclePolicyPreviewFilter `locationName:"filter" type:"structure"` + + // The list of imageIDs to be included. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` + + // The maximum number of repository results returned by GetLifecyclePolicyPreviewRequest + // in
 paginated output. When this parameter is used, GetLifecyclePolicyPreviewRequest + // only returns
 maxResults results in a single page along with a nextToken + // response element. The remaining results of the initial request can be seen + // by sending
 another GetLifecyclePolicyPreviewRequest request with the returned + // nextToken
 value. This value can be between 1 and 100. If this
 parameter + // is not used, then GetLifecyclePolicyPreviewRequest returns up to
 100 results + // and a nextToken value, if
 applicable. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated
 GetLifecyclePolicyPreviewRequest + // request where maxResults was used and the
 results exceeded the value of + // that parameter. Pagination continues from the end of the
 previous results + // that returned the nextToken value. This value is
 null when there are no + // more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository with the policy to retrieve. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetLifecyclePolicyPreviewInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLifecyclePolicyPreviewInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLifecyclePolicyPreviewInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLifecyclePolicyPreviewInput"} + if s.ImageIds != nil && len(s.ImageIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *GetLifecyclePolicyPreviewInput) SetFilter(v *LifecyclePolicyPreviewFilter) *GetLifecyclePolicyPreviewInput { + s.Filter = v + return s +} + +// SetImageIds sets the ImageIds field's value. +func (s *GetLifecyclePolicyPreviewInput) SetImageIds(v []*ImageIdentifier) *GetLifecyclePolicyPreviewInput { + s.ImageIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetLifecyclePolicyPreviewInput) SetMaxResults(v int64) *GetLifecyclePolicyPreviewInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetLifecyclePolicyPreviewInput) SetNextToken(v string) *GetLifecyclePolicyPreviewInput { + s.NextToken = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetLifecyclePolicyPreviewInput) SetRegistryId(v string) *GetLifecyclePolicyPreviewInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetLifecyclePolicyPreviewInput) SetRepositoryName(v string) *GetLifecyclePolicyPreviewInput { + s.RepositoryName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreviewResponse +type GetLifecyclePolicyPreviewOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The nextToken value to include in a future GetLifecyclePolicyPreview request. + // When the results of a GetLifecyclePolicyPreview request exceed maxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The results of the lifecycle policy preview request. + PreviewResults []*LifecyclePolicyPreviewResult `locationName:"previewResults" type:"list"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The status of the lifecycle policy preview request. + Status *string `locationName:"status" type:"string" enum:"LifecyclePolicyPreviewStatus"` + + // The list of images that is returned as a result of the action. + Summary *LifecyclePolicyPreviewSummary `locationName:"summary" type:"structure"` +} + +// String returns the string representation +func (s GetLifecyclePolicyPreviewOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLifecyclePolicyPreviewOutput) GoString() string { + return s.String() +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetLifecyclePolicyText(v string) *GetLifecyclePolicyPreviewOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetNextToken(v string) *GetLifecyclePolicyPreviewOutput { + s.NextToken = &v + return s +} + +// SetPreviewResults sets the PreviewResults field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetPreviewResults(v []*LifecyclePolicyPreviewResult) *GetLifecyclePolicyPreviewOutput { + s.PreviewResults = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetRegistryId(v string) *GetLifecyclePolicyPreviewOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetRepositoryName(v string) *GetLifecyclePolicyPreviewOutput { + s.RepositoryName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetStatus(v string) *GetLifecyclePolicyPreviewOutput { + s.Status = &v + return s +} + +// SetSummary sets the Summary field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetSummary(v *LifecyclePolicyPreviewSummary) *GetLifecyclePolicyPreviewOutput { + s.Summary = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicyRequest type GetRepositoryPolicyInput struct { _ struct{} `type:"structure"` @@ -2998,7 +3844,7 @@ type GetRepositoryPolicyInput struct { // If you do not specify a registry, the default registry is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // The name of the repository whose policy you want to retrieve. + // The name of the repository with the policy to retrieve. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` @@ -3042,7 +3888,7 @@ func (s *GetRepositoryPolicyInput) SetRepositoryName(v string) *GetRepositoryPol return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicyResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicyResponse type GetRepositoryPolicyOutput struct { _ struct{} `type:"structure"` @@ -3085,7 +3931,7 @@ func (s *GetRepositoryPolicyOutput) SetRepositoryName(v string) *GetRepositoryPo } // An object representing an Amazon ECR image. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/Image +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/Image type Image struct { _ struct{} `type:"structure"` @@ -3137,7 +3983,7 @@ func (s *Image) SetRepositoryName(v string) *Image { } // An object that describes an image returned by a DescribeImages operation. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ImageDetail +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ImageDetail type ImageDetail struct { _ struct{} `type:"structure"` @@ -3213,7 +4059,7 @@ func (s *ImageDetail) SetRepositoryName(v string) *ImageDetail { } // An object representing an Amazon ECR image failure. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ImageFailure +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ImageFailure type ImageFailure struct { _ struct{} `type:"structure"` @@ -3256,7 +4102,7 @@ func (s *ImageFailure) SetImageId(v *ImageIdentifier) *ImageFailure { } // An object with identifying information for an Amazon ECR image. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ImageIdentifier +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ImageIdentifier type ImageIdentifier struct { _ struct{} `type:"structure"` @@ -3289,15 +4135,15 @@ func (s *ImageIdentifier) SetImageTag(v string) *ImageIdentifier { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUploadRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUploadRequest type InitiateLayerUploadInput struct { _ struct{} `type:"structure"` - // The AWS account ID associated with the registry that you intend to upload - // layers to. If you do not specify a registry, the default registry is assumed. + // The AWS account ID associated with the registry to which you intend to upload + // layers. If you do not specify a registry, the default registry is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // The name of the repository that you intend to upload layers to. + // The name of the repository to which you intend to upload layers. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` @@ -3341,7 +4187,7 @@ func (s *InitiateLayerUploadInput) SetRepositoryName(v string) *InitiateLayerUpl return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUploadResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUploadResponse type InitiateLayerUploadOutput struct { _ struct{} `type:"structure"` @@ -3377,7 +4223,7 @@ func (s *InitiateLayerUploadOutput) SetUploadId(v string) *InitiateLayerUploadOu } // An object representing an Amazon ECR image layer. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/Layer +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/Layer type Layer struct { _ struct{} `type:"structure"` @@ -3430,7 +4276,7 @@ func (s *Layer) SetMediaType(v string) *Layer { } // An object representing an Amazon ECR image layer failure. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/LayerFailure +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/LayerFailure type LayerFailure struct { _ struct{} `type:"structure"` @@ -3472,8 +4318,145 @@ func (s *LayerFailure) SetLayerDigest(v string) *LayerFailure { return s } +// The filter for the lifecycle policy preview. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/LifecyclePolicyPreviewFilter +type LifecyclePolicyPreviewFilter struct { + _ struct{} `type:"structure"` + + // The tag status of the image. + TagStatus *string `locationName:"tagStatus" type:"string" enum:"TagStatus"` +} + +// String returns the string representation +func (s LifecyclePolicyPreviewFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyPreviewFilter) GoString() string { + return s.String() +} + +// SetTagStatus sets the TagStatus field's value. +func (s *LifecyclePolicyPreviewFilter) SetTagStatus(v string) *LifecyclePolicyPreviewFilter { + s.TagStatus = &v + return s +} + +// The result of the lifecycle policy preview. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/LifecyclePolicyPreviewResult +type LifecyclePolicyPreviewResult struct { + _ struct{} `type:"structure"` + + // The type of action to be taken. + Action *LifecyclePolicyRuleAction `locationName:"action" type:"structure"` + + // The priority of the applied rule. + AppliedRulePriority *int64 `locationName:"appliedRulePriority" min:"1" type:"integer"` + + // The sha256 digest of the image manifest. + ImageDigest *string `locationName:"imageDigest" type:"string"` + + // The date and time, expressed in standard JavaScript date format, at which + // the current image was pushed to the repository. + ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp" timestampFormat:"unix"` + + // The list of tags associated with this image. + ImageTags []*string `locationName:"imageTags" type:"list"` +} + +// String returns the string representation +func (s LifecyclePolicyPreviewResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyPreviewResult) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *LifecyclePolicyPreviewResult) SetAction(v *LifecyclePolicyRuleAction) *LifecyclePolicyPreviewResult { + s.Action = v + return s +} + +// SetAppliedRulePriority sets the AppliedRulePriority field's value. +func (s *LifecyclePolicyPreviewResult) SetAppliedRulePriority(v int64) *LifecyclePolicyPreviewResult { + s.AppliedRulePriority = &v + return s +} + +// SetImageDigest sets the ImageDigest field's value. +func (s *LifecyclePolicyPreviewResult) SetImageDigest(v string) *LifecyclePolicyPreviewResult { + s.ImageDigest = &v + return s +} + +// SetImagePushedAt sets the ImagePushedAt field's value. +func (s *LifecyclePolicyPreviewResult) SetImagePushedAt(v time.Time) *LifecyclePolicyPreviewResult { + s.ImagePushedAt = &v + return s +} + +// SetImageTags sets the ImageTags field's value. +func (s *LifecyclePolicyPreviewResult) SetImageTags(v []*string) *LifecyclePolicyPreviewResult { + s.ImageTags = v + return s +} + +// The summary of the lifecycle policy preview request. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/LifecyclePolicyPreviewSummary +type LifecyclePolicyPreviewSummary struct { + _ struct{} `type:"structure"` + + // The number of expiring images. + ExpiringImageTotalCount *int64 `locationName:"expiringImageTotalCount" type:"integer"` +} + +// String returns the string representation +func (s LifecyclePolicyPreviewSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyPreviewSummary) GoString() string { + return s.String() +} + +// SetExpiringImageTotalCount sets the ExpiringImageTotalCount field's value. +func (s *LifecyclePolicyPreviewSummary) SetExpiringImageTotalCount(v int64) *LifecyclePolicyPreviewSummary { + s.ExpiringImageTotalCount = &v + return s +} + +// The type of action to be taken. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/LifecyclePolicyRuleAction +type LifecyclePolicyRuleAction struct { + _ struct{} `type:"structure"` + + // The type of action to be taken. + Type *string `locationName:"type" type:"string" enum:"ImageActionType"` +} + +// String returns the string representation +func (s LifecyclePolicyRuleAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyRuleAction) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *LifecyclePolicyRuleAction) SetType(v string) *LifecyclePolicyRuleAction { + s.Type = &v + return s +} + // An object representing a filter on a ListImages operation. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImagesFilter +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImagesFilter type ListImagesFilter struct { _ struct{} `type:"structure"` @@ -3498,7 +4481,7 @@ func (s *ListImagesFilter) SetTagStatus(v string) *ListImagesFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImagesRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImagesRequest type ListImagesInput struct { _ struct{} `type:"structure"` @@ -3524,11 +4507,11 @@ type ListImagesInput struct { NextToken *string `locationName:"nextToken" type:"string"` // The AWS account ID associated with the registry that contains the repository - // to list images in. If you do not specify a registry, the default registry + // in which to list images. If you do not specify a registry, the default registry // is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // The repository whose image IDs are to be listed. + // The repository with image IDs to be listed. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` @@ -3593,7 +4576,7 @@ func (s *ListImagesInput) SetRepositoryName(v string) *ListImagesInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImagesResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImagesResponse type ListImagesOutput struct { _ struct{} `type:"structure"` @@ -3629,7 +4612,7 @@ func (s *ListImagesOutput) SetNextToken(v string) *ListImagesOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageRequest type PutImageInput struct { _ struct{} `type:"structure"` @@ -3706,7 +4689,7 @@ func (s *PutImageInput) SetRepositoryName(v string) *PutImageInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageResponse type PutImageOutput struct { _ struct{} `type:"structure"` @@ -3730,28 +4713,138 @@ func (s *PutImageOutput) SetImage(v *Image) *PutImageOutput { return s } +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicyRequest +type PutLifecyclePolicyInput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text to apply to the repository. + // + // LifecyclePolicyText is a required field + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do
 not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to receive the policy. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutLifecyclePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecyclePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutLifecyclePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutLifecyclePolicyInput"} + if s.LifecyclePolicyText == nil { + invalidParams.Add(request.NewErrParamRequired("LifecyclePolicyText")) + } + if s.LifecyclePolicyText != nil && len(*s.LifecyclePolicyText) < 100 { + invalidParams.Add(request.NewErrParamMinLen("LifecyclePolicyText", 100)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *PutLifecyclePolicyInput) SetLifecyclePolicyText(v string) *PutLifecyclePolicyInput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutLifecyclePolicyInput) SetRegistryId(v string) *PutLifecyclePolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutLifecyclePolicyInput) SetRepositoryName(v string) *PutLifecyclePolicyInput { + s.RepositoryName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicyResponse +type PutLifecyclePolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s PutLifecyclePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecyclePolicyOutput) GoString() string { + return s.String() +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *PutLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *PutLifecyclePolicyOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutLifecyclePolicyOutput) SetRegistryId(v string) *PutLifecyclePolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutLifecyclePolicyOutput) SetRepositoryName(v string) *PutLifecyclePolicyOutput { + s.RepositoryName = &v + return s +} + // An object representing a repository. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/Repository +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/Repository type Repository struct { _ struct{} `type:"structure"` - // The date and time, in JavaScript date/time format, when the repository was - // created. + // The date and time, in JavaScript date format, when the repository was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` // The AWS account ID associated with the registry that contains the repository. RegistryId *string `locationName:"registryId" type:"string"` // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains - // the arn:aws:ecr namespace, followed by the region of the repository, the - // AWS account ID of the repository owner, the repository namespace, and then - // the repository name. For example, arn:aws:ecr:region:012345678910:repository/test. + // the arn:aws:ecr namespace, followed by the region of the repository, AWS + // account ID of the repository owner, repository namespace, and repository + // name. For example, arn:aws:ecr:region:012345678910:repository/test. RepositoryArn *string `locationName:"repositoryArn" type:"string"` // The name of the repository. RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` - // The URI for the repository. You can use this URI for Docker push and pull + // The URI for the repository. You can use this URI for Docker push or pull // operations. RepositoryUri *string `locationName:"repositoryUri" type:"string"` } @@ -3796,7 +4889,7 @@ func (s *Repository) SetRepositoryUri(v string) *Repository { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicyRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicyRequest type SetRepositoryPolicyInput struct { _ struct{} `type:"structure"` @@ -3873,7 +4966,7 @@ func (s *SetRepositoryPolicyInput) SetRepositoryName(v string) *SetRepositoryPol return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicyResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicyResponse type SetRepositoryPolicyOutput struct { _ struct{} `type:"structure"` @@ -3915,7 +5008,123 @@ func (s *SetRepositoryPolicyOutput) SetRepositoryName(v string) *SetRepositoryPo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPartRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreviewRequest +type StartLifecyclePolicyPreviewInput struct { + _ struct{} `type:"structure"` + + // The policy to be evaluated against. If you do not specify a policy, the current + // policy for the repository is used. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to be evaluated. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartLifecyclePolicyPreviewInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLifecyclePolicyPreviewInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartLifecyclePolicyPreviewInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartLifecyclePolicyPreviewInput"} + if s.LifecyclePolicyText != nil && len(*s.LifecyclePolicyText) < 100 { + invalidParams.Add(request.NewErrParamMinLen("LifecyclePolicyText", 100)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *StartLifecyclePolicyPreviewInput) SetLifecyclePolicyText(v string) *StartLifecyclePolicyPreviewInput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartLifecyclePolicyPreviewInput) SetRegistryId(v string) *StartLifecyclePolicyPreviewInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartLifecyclePolicyPreviewInput) SetRepositoryName(v string) *StartLifecyclePolicyPreviewInput { + s.RepositoryName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreviewResponse +type StartLifecyclePolicyPreviewOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The status of the lifecycle policy preview request. + Status *string `locationName:"status" type:"string" enum:"LifecyclePolicyPreviewStatus"` +} + +// String returns the string representation +func (s StartLifecyclePolicyPreviewOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLifecyclePolicyPreviewOutput) GoString() string { + return s.String() +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetLifecyclePolicyText(v string) *StartLifecyclePolicyPreviewOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetRegistryId(v string) *StartLifecyclePolicyPreviewOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetRepositoryName(v string) *StartLifecyclePolicyPreviewOutput { + s.RepositoryName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetStatus(v string) *StartLifecyclePolicyPreviewOutput { + s.Status = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPartRequest type UploadLayerPartInput struct { _ struct{} `type:"structure"` @@ -3936,11 +5145,11 @@ type UploadLayerPartInput struct { // PartLastByte is a required field PartLastByte *int64 `locationName:"partLastByte" type:"long" required:"true"` - // The AWS account ID associated with the registry that you are uploading layer - // parts to. If you do not specify a registry, the default registry is assumed. + // The AWS account ID associated with the registry to which you are uploading + // layer parts. If you do not specify a registry, the default registry is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // The name of the repository that you are uploading layer parts to. + // The name of the repository to which you are uploading layer parts. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` @@ -4026,7 +5235,7 @@ func (s *UploadLayerPartInput) SetUploadId(v string) *UploadLayerPartInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPartResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPartResponse type UploadLayerPartOutput struct { _ struct{} `type:"structure"` @@ -4077,6 +5286,11 @@ func (s *UploadLayerPartOutput) SetUploadId(v string) *UploadLayerPartOutput { return s } +const ( + // ImageActionTypeExpire is a ImageActionType enum value + ImageActionTypeExpire = "EXPIRE" +) + const ( // ImageFailureCodeInvalidImageDigest is a ImageFailureCode enum value ImageFailureCodeInvalidImageDigest = "InvalidImageDigest" @@ -4110,6 +5324,20 @@ const ( LayerFailureCodeMissingLayerDigest = "MissingLayerDigest" ) +const ( + // LifecyclePolicyPreviewStatusInProgress is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusInProgress = "IN_PROGRESS" + + // LifecyclePolicyPreviewStatusComplete is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusComplete = "COMPLETE" + + // LifecyclePolicyPreviewStatusExpired is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusExpired = "EXPIRED" + + // LifecyclePolicyPreviewStatusFailed is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusFailed = "FAILED" +) + const ( // TagStatusTagged is a TagStatus enum value TagStatusTagged = "TAGGED" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go index 987aa72fa..a2aea0f6e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go @@ -3,11 +3,11 @@ // Package ecr provides the client and types for making API // requests to Amazon EC2 Container Registry. // -// Amazon EC2 Container Registry (Amazon ECR) is a managed AWS Docker registry -// service. Customers can use the familiar Docker CLI to push, pull, and manage -// images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon -// ECR supports private Docker repositories with resource-based permissions -// using AWS IAM so that specific users or Amazon EC2 instances can access repositories +// Amazon EC2 Container Registry (Amazon ECR) is a managed Docker registry service. +// Customers can use the familiar Docker CLI to push, pull, and manage images. +// Amazon ECR provides a secure, scalable, and reliable registry. Amazon ECR +// supports private Docker repositories with resource-based permissions using +// IAM so that specific users or Amazon EC2 instances can access repositories // and images. Developers can use the Docker CLI to author and manage images. // // See https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21 for more information on this service. @@ -17,7 +17,7 @@ // // Using the Client // -// To Amazon EC2 Container Registry with the SDK use the New function to create +// To contact Amazon EC2 Container Registry with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. // These clients are safe to use concurrently. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go index 4399e6a29..2a2caaedd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go @@ -13,8 +13,8 @@ const ( // ErrCodeImageAlreadyExistsException for service response error code // "ImageAlreadyExistsException". // - // The specified image has already been pushed, and there are no changes to - // the manifest or image tag since the last push. + // The specified image has already been pushed, and there were no changes to + // the manifest or image tag after the last push. ErrCodeImageAlreadyExistsException = "ImageAlreadyExistsException" // ErrCodeImageNotFoundException for service response error code @@ -70,6 +70,25 @@ const ( // for this repository. ErrCodeLayersNotFoundException = "LayersNotFoundException" + // ErrCodeLifecyclePolicyNotFoundException for service response error code + // "LifecyclePolicyNotFoundException". + // + // The lifecycle policy could not be found, and no policy is set to the repository. + ErrCodeLifecyclePolicyNotFoundException = "LifecyclePolicyNotFoundException" + + // ErrCodeLifecyclePolicyPreviewInProgressException for service response error code + // "LifecyclePolicyPreviewInProgressException". + // + // The previous lifecycle policy preview request has not completed. Please try + // again later. + ErrCodeLifecyclePolicyPreviewInProgressException = "LifecyclePolicyPreviewInProgressException" + + // ErrCodeLifecyclePolicyPreviewNotFoundException for service response error code + // "LifecyclePolicyPreviewNotFoundException". + // + // There is no dry run for this repository. + ErrCodeLifecyclePolicyPreviewNotFoundException = "LifecyclePolicyPreviewNotFoundException" + // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index e65ef266f..0d852f59c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -39,7 +39,7 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { op := &request.Operation{ Name: opAbortMultipartUpload, @@ -75,7 +75,7 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // * ErrCodeNoSuchUpload "NoSuchUpload" // The specified multipart upload does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { req, out := c.AbortMultipartUploadRequest(input) return out, req.Send() @@ -122,7 +122,7 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { op := &request.Operation{ Name: opCompleteMultipartUpload, @@ -149,7 +149,7 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation CompleteMultipartUpload for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { req, out := c.CompleteMultipartUploadRequest(input) return out, req.Send() @@ -196,7 +196,7 @@ const opCopyObject = "CopyObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { op := &request.Operation{ Name: opCopyObject, @@ -229,7 +229,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // The source object of the COPY operation is not in the active tier and is // only stored in Amazon Glacier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { req, out := c.CopyObjectRequest(input) return out, req.Send() @@ -276,7 +276,7 @@ const opCreateBucket = "CreateBucket" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { op := &request.Operation{ Name: opCreateBucket, @@ -311,7 +311,7 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // // * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { req, out := c.CreateBucketRequest(input) return out, req.Send() @@ -358,7 +358,7 @@ const opCreateMultipartUpload = "CreateMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { op := &request.Operation{ Name: opCreateMultipartUpload, @@ -391,7 +391,7 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation CreateMultipartUpload for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { req, out := c.CreateMultipartUploadRequest(input) return out, req.Send() @@ -438,7 +438,7 @@ const opDeleteBucket = "DeleteBucket" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { op := &request.Operation{ Name: opDeleteBucket, @@ -468,7 +468,7 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucket for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { req, out := c.DeleteBucketRequest(input) return out, req.Send() @@ -515,7 +515,7 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketAnalyticsConfiguration, @@ -545,7 +545,7 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) return out, req.Send() @@ -592,7 +592,7 @@ const opDeleteBucketCors = "DeleteBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { op := &request.Operation{ Name: opDeleteBucketCors, @@ -621,7 +621,7 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { req, out := c.DeleteBucketCorsRequest(input) return out, req.Send() @@ -643,6 +643,82 @@ func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCor return out, req.Send() } +const opDeleteBucketEncryption = "DeleteBucketEncryption" + +// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketEncryptionRequest method. +// req, resp := client.DeleteBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { + op := &request.Operation{ + Name: opDeleteBucketEncryption, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &DeleteBucketEncryptionInput{} + } + + output = &DeleteBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketEncryption API operation for Amazon Simple Storage Service. +// +// Deletes the server-side encryption configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + return out, req.Send() +} + +// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" // DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the @@ -668,7 +744,7 @@ const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketInventoryConfiguration, @@ -698,7 +774,7 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { req, out := c.DeleteBucketInventoryConfigurationRequest(input) return out, req.Send() @@ -745,7 +821,7 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { op := &request.Operation{ Name: opDeleteBucketLifecycle, @@ -774,7 +850,7 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { req, out := c.DeleteBucketLifecycleRequest(input) return out, req.Send() @@ -821,7 +897,7 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketMetricsConfiguration, @@ -851,7 +927,7 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { req, out := c.DeleteBucketMetricsConfigurationRequest(input) return out, req.Send() @@ -898,7 +974,7 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { op := &request.Operation{ Name: opDeleteBucketPolicy, @@ -927,7 +1003,7 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { req, out := c.DeleteBucketPolicyRequest(input) return out, req.Send() @@ -974,7 +1050,7 @@ const opDeleteBucketReplication = "DeleteBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { op := &request.Operation{ Name: opDeleteBucketReplication, @@ -1003,7 +1079,7 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { req, out := c.DeleteBucketReplicationRequest(input) return out, req.Send() @@ -1050,7 +1126,7 @@ const opDeleteBucketTagging = "DeleteBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { op := &request.Operation{ Name: opDeleteBucketTagging, @@ -1079,7 +1155,7 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { req, out := c.DeleteBucketTaggingRequest(input) return out, req.Send() @@ -1126,7 +1202,7 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { op := &request.Operation{ Name: opDeleteBucketWebsite, @@ -1155,7 +1231,7 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { req, out := c.DeleteBucketWebsiteRequest(input) return out, req.Send() @@ -1202,7 +1278,7 @@ const opDeleteObject = "DeleteObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { op := &request.Operation{ Name: opDeleteObject, @@ -1231,7 +1307,7 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { req, out := c.DeleteObjectRequest(input) return out, req.Send() @@ -1278,7 +1354,7 @@ const opDeleteObjectTagging = "DeleteObjectTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { op := &request.Operation{ Name: opDeleteObjectTagging, @@ -1305,7 +1381,7 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { req, out := c.DeleteObjectTaggingRequest(input) return out, req.Send() @@ -1352,7 +1428,7 @@ const opDeleteObjects = "DeleteObjects" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { op := &request.Operation{ Name: opDeleteObjects, @@ -1380,7 +1456,7 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObjects for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { req, out := c.DeleteObjectsRequest(input) return out, req.Send() @@ -1427,7 +1503,7 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { op := &request.Operation{ Name: opGetBucketAccelerateConfiguration, @@ -1454,7 +1530,7 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAccelerateConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { req, out := c.GetBucketAccelerateConfigurationRequest(input) return out, req.Send() @@ -1501,7 +1577,7 @@ const opGetBucketAcl = "GetBucketAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { op := &request.Operation{ Name: opGetBucketAcl, @@ -1528,7 +1604,7 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAcl for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { req, out := c.GetBucketAclRequest(input) return out, req.Send() @@ -1575,7 +1651,7 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opGetBucketAnalyticsConfiguration, @@ -1603,7 +1679,7 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { req, out := c.GetBucketAnalyticsConfigurationRequest(input) return out, req.Send() @@ -1650,7 +1726,7 @@ const opGetBucketCors = "GetBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { op := &request.Operation{ Name: opGetBucketCors, @@ -1677,7 +1753,7 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { req, out := c.GetBucketCorsRequest(input) return out, req.Send() @@ -1699,6 +1775,80 @@ func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput return out, req.Send() } +const opGetBucketEncryption = "GetBucketEncryption" + +// GetBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketEncryption for more information on using the GetBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketEncryptionRequest method. +// req, resp := client.GetBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { + op := &request.Operation{ + Name: opGetBucketEncryption, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &GetBucketEncryptionInput{} + } + + output = &GetBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketEncryption API operation for Amazon Simple Storage Service. +// +// Returns the server-side encryption configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + return out, req.Send() +} + +// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the @@ -1724,7 +1874,7 @@ const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { op := &request.Operation{ Name: opGetBucketInventoryConfiguration, @@ -1752,7 +1902,7 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { req, out := c.GetBucketInventoryConfigurationRequest(input) return out, req.Send() @@ -1799,7 +1949,7 @@ const opGetBucketLifecycle = "GetBucketLifecycle" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") @@ -1829,7 +1979,7 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { req, out := c.GetBucketLifecycleRequest(input) return out, req.Send() @@ -1876,7 +2026,7 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { op := &request.Operation{ Name: opGetBucketLifecycleConfiguration, @@ -1903,7 +2053,7 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLifecycleConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { req, out := c.GetBucketLifecycleConfigurationRequest(input) return out, req.Send() @@ -1950,7 +2100,7 @@ const opGetBucketLocation = "GetBucketLocation" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { op := &request.Operation{ Name: opGetBucketLocation, @@ -1977,7 +2127,7 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLocation for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { req, out := c.GetBucketLocationRequest(input) return out, req.Send() @@ -2024,7 +2174,7 @@ const opGetBucketLogging = "GetBucketLogging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { op := &request.Operation{ Name: opGetBucketLogging, @@ -2052,7 +2202,7 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLogging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { req, out := c.GetBucketLoggingRequest(input) return out, req.Send() @@ -2099,7 +2249,7 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opGetBucketMetricsConfiguration, @@ -2127,7 +2277,7 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { req, out := c.GetBucketMetricsConfigurationRequest(input) return out, req.Send() @@ -2174,7 +2324,7 @@ const opGetBucketNotification = "GetBucketNotification" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") @@ -2204,7 +2354,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketNotification for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { req, out := c.GetBucketNotificationRequest(input) return out, req.Send() @@ -2251,7 +2401,7 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { op := &request.Operation{ Name: opGetBucketNotificationConfiguration, @@ -2278,7 +2428,7 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketNotificationConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { req, out := c.GetBucketNotificationConfigurationRequest(input) return out, req.Send() @@ -2325,7 +2475,7 @@ const opGetBucketPolicy = "GetBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { op := &request.Operation{ Name: opGetBucketPolicy, @@ -2352,7 +2502,7 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { req, out := c.GetBucketPolicyRequest(input) return out, req.Send() @@ -2399,7 +2549,7 @@ const opGetBucketReplication = "GetBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { op := &request.Operation{ Name: opGetBucketReplication, @@ -2426,7 +2576,7 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { req, out := c.GetBucketReplicationRequest(input) return out, req.Send() @@ -2473,7 +2623,7 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { op := &request.Operation{ Name: opGetBucketRequestPayment, @@ -2500,7 +2650,7 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketRequestPayment for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { req, out := c.GetBucketRequestPaymentRequest(input) return out, req.Send() @@ -2547,7 +2697,7 @@ const opGetBucketTagging = "GetBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { op := &request.Operation{ Name: opGetBucketTagging, @@ -2574,7 +2724,7 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { req, out := c.GetBucketTaggingRequest(input) return out, req.Send() @@ -2621,7 +2771,7 @@ const opGetBucketVersioning = "GetBucketVersioning" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { op := &request.Operation{ Name: opGetBucketVersioning, @@ -2648,7 +2798,7 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketVersioning for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { req, out := c.GetBucketVersioningRequest(input) return out, req.Send() @@ -2695,7 +2845,7 @@ const opGetBucketWebsite = "GetBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { op := &request.Operation{ Name: opGetBucketWebsite, @@ -2722,7 +2872,7 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { req, out := c.GetBucketWebsiteRequest(input) return out, req.Send() @@ -2769,7 +2919,7 @@ const opGetObject = "GetObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { op := &request.Operation{ Name: opGetObject, @@ -2801,7 +2951,7 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { req, out := c.GetObjectRequest(input) return out, req.Send() @@ -2848,7 +2998,7 @@ const opGetObjectAcl = "GetObjectAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { op := &request.Operation{ Name: opGetObjectAcl, @@ -2880,7 +3030,7 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { req, out := c.GetObjectAclRequest(input) return out, req.Send() @@ -2927,7 +3077,7 @@ const opGetObjectTagging = "GetObjectTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { op := &request.Operation{ Name: opGetObjectTagging, @@ -2954,7 +3104,7 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { req, out := c.GetObjectTaggingRequest(input) return out, req.Send() @@ -3001,7 +3151,7 @@ const opGetObjectTorrent = "GetObjectTorrent" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { op := &request.Operation{ Name: opGetObjectTorrent, @@ -3028,7 +3178,7 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetObjectTorrent for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { req, out := c.GetObjectTorrentRequest(input) return out, req.Send() @@ -3075,7 +3225,7 @@ const opHeadBucket = "HeadBucket" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { op := &request.Operation{ Name: opHeadBucket, @@ -3110,7 +3260,7 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou // * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { req, out := c.HeadBucketRequest(input) return out, req.Send() @@ -3157,7 +3307,7 @@ const opHeadObject = "HeadObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { op := &request.Operation{ Name: opHeadObject, @@ -3189,7 +3339,7 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation HeadObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { req, out := c.HeadObjectRequest(input) return out, req.Send() @@ -3236,7 +3386,7 @@ const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { op := &request.Operation{ Name: opListBucketAnalyticsConfigurations, @@ -3263,7 +3413,7 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBucketAnalyticsConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { req, out := c.ListBucketAnalyticsConfigurationsRequest(input) return out, req.Send() @@ -3310,7 +3460,7 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { op := &request.Operation{ Name: opListBucketInventoryConfigurations, @@ -3337,7 +3487,7 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBucketInventoryConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { req, out := c.ListBucketInventoryConfigurationsRequest(input) return out, req.Send() @@ -3384,7 +3534,7 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { op := &request.Operation{ Name: opListBucketMetricsConfigurations, @@ -3411,7 +3561,7 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBucketMetricsConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { req, out := c.ListBucketMetricsConfigurationsRequest(input) return out, req.Send() @@ -3458,7 +3608,7 @@ const opListBuckets = "ListBuckets" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { op := &request.Operation{ Name: opListBuckets, @@ -3485,7 +3635,7 @@ func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBuckets for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { req, out := c.ListBucketsRequest(input) return out, req.Send() @@ -3532,7 +3682,7 @@ const opListMultipartUploads = "ListMultipartUploads" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { op := &request.Operation{ Name: opListMultipartUploads, @@ -3565,7 +3715,7 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListMultipartUploads for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { req, out := c.ListMultipartUploadsRequest(input) return out, req.Send() @@ -3662,7 +3812,7 @@ const opListObjectVersions = "ListObjectVersions" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { op := &request.Operation{ Name: opListObjectVersions, @@ -3695,7 +3845,7 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListObjectVersions for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { req, out := c.ListObjectVersionsRequest(input) return out, req.Send() @@ -3792,7 +3942,7 @@ const opListObjects = "ListObjects" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { op := &request.Operation{ Name: opListObjects, @@ -3832,7 +3982,7 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { req, out := c.ListObjectsRequest(input) return out, req.Send() @@ -3929,7 +4079,7 @@ const opListObjectsV2 = "ListObjectsV2" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { op := &request.Operation{ Name: opListObjectsV2, @@ -3970,7 +4120,7 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { req, out := c.ListObjectsV2Request(input) return out, req.Send() @@ -4067,7 +4217,7 @@ const opListParts = "ListParts" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { op := &request.Operation{ Name: opListParts, @@ -4100,7 +4250,7 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListParts for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { req, out := c.ListPartsRequest(input) return out, req.Send() @@ -4197,7 +4347,7 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { op := &request.Operation{ Name: opPutBucketAccelerateConfiguration, @@ -4226,7 +4376,7 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAccelerateConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { req, out := c.PutBucketAccelerateConfigurationRequest(input) return out, req.Send() @@ -4273,7 +4423,7 @@ const opPutBucketAcl = "PutBucketAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { op := &request.Operation{ Name: opPutBucketAcl, @@ -4302,7 +4452,7 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAcl for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { req, out := c.PutBucketAclRequest(input) return out, req.Send() @@ -4349,7 +4499,7 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opPutBucketAnalyticsConfiguration, @@ -4379,7 +4529,7 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { req, out := c.PutBucketAnalyticsConfigurationRequest(input) return out, req.Send() @@ -4426,7 +4576,7 @@ const opPutBucketCors = "PutBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { op := &request.Operation{ Name: opPutBucketCors, @@ -4455,7 +4605,7 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { req, out := c.PutBucketCorsRequest(input) return out, req.Send() @@ -4477,6 +4627,83 @@ func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput return out, req.Send() } +const opPutBucketEncryption = "PutBucketEncryption" + +// PutBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketEncryption for more information on using the PutBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketEncryptionRequest method. +// req, resp := client.PutBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { + op := &request.Operation{ + Name: opPutBucketEncryption, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &PutBucketEncryptionInput{} + } + + output = &PutBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketEncryption API operation for Amazon Simple Storage Service. +// +// Creates a new server-side encryption configuration (or replaces an existing +// one, if present). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + return out, req.Send() +} + +// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" // PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the @@ -4502,7 +4729,7 @@ const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { op := &request.Operation{ Name: opPutBucketInventoryConfiguration, @@ -4532,7 +4759,7 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { req, out := c.PutBucketInventoryConfigurationRequest(input) return out, req.Send() @@ -4579,7 +4806,7 @@ const opPutBucketLifecycle = "PutBucketLifecycle" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") @@ -4611,7 +4838,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { req, out := c.PutBucketLifecycleRequest(input) return out, req.Send() @@ -4658,7 +4885,7 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { op := &request.Operation{ Name: opPutBucketLifecycleConfiguration, @@ -4688,7 +4915,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLifecycleConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { req, out := c.PutBucketLifecycleConfigurationRequest(input) return out, req.Send() @@ -4735,7 +4962,7 @@ const opPutBucketLogging = "PutBucketLogging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { op := &request.Operation{ Name: opPutBucketLogging, @@ -4766,7 +4993,7 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLogging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { req, out := c.PutBucketLoggingRequest(input) return out, req.Send() @@ -4813,7 +5040,7 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opPutBucketMetricsConfiguration, @@ -4843,7 +5070,7 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { req, out := c.PutBucketMetricsConfigurationRequest(input) return out, req.Send() @@ -4890,7 +5117,7 @@ const opPutBucketNotification = "PutBucketNotification" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") @@ -4922,7 +5149,7 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketNotification for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { req, out := c.PutBucketNotificationRequest(input) return out, req.Send() @@ -4969,7 +5196,7 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { op := &request.Operation{ Name: opPutBucketNotificationConfiguration, @@ -4998,7 +5225,7 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketNotificationConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { req, out := c.PutBucketNotificationConfigurationRequest(input) return out, req.Send() @@ -5045,7 +5272,7 @@ const opPutBucketPolicy = "PutBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { op := &request.Operation{ Name: opPutBucketPolicy, @@ -5075,7 +5302,7 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { req, out := c.PutBucketPolicyRequest(input) return out, req.Send() @@ -5122,7 +5349,7 @@ const opPutBucketReplication = "PutBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { op := &request.Operation{ Name: opPutBucketReplication, @@ -5152,7 +5379,7 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { req, out := c.PutBucketReplicationRequest(input) return out, req.Send() @@ -5199,7 +5426,7 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { op := &request.Operation{ Name: opPutBucketRequestPayment, @@ -5232,7 +5459,7 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketRequestPayment for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { req, out := c.PutBucketRequestPaymentRequest(input) return out, req.Send() @@ -5279,7 +5506,7 @@ const opPutBucketTagging = "PutBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { op := &request.Operation{ Name: opPutBucketTagging, @@ -5308,7 +5535,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { req, out := c.PutBucketTaggingRequest(input) return out, req.Send() @@ -5355,7 +5582,7 @@ const opPutBucketVersioning = "PutBucketVersioning" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { op := &request.Operation{ Name: opPutBucketVersioning, @@ -5385,7 +5612,7 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketVersioning for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { req, out := c.PutBucketVersioningRequest(input) return out, req.Send() @@ -5432,7 +5659,7 @@ const opPutBucketWebsite = "PutBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { op := &request.Operation{ Name: opPutBucketWebsite, @@ -5461,7 +5688,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { req, out := c.PutBucketWebsiteRequest(input) return out, req.Send() @@ -5508,7 +5735,7 @@ const opPutObject = "PutObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { op := &request.Operation{ Name: opPutObject, @@ -5535,7 +5762,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { req, out := c.PutObjectRequest(input) return out, req.Send() @@ -5582,7 +5809,7 @@ const opPutObjectAcl = "PutObjectAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { op := &request.Operation{ Name: opPutObjectAcl, @@ -5615,7 +5842,7 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { req, out := c.PutObjectAclRequest(input) return out, req.Send() @@ -5662,7 +5889,7 @@ const opPutObjectTagging = "PutObjectTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { op := &request.Operation{ Name: opPutObjectTagging, @@ -5689,7 +5916,7 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { req, out := c.PutObjectTaggingRequest(input) return out, req.Send() @@ -5736,7 +5963,7 @@ const opRestoreObject = "RestoreObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { op := &request.Operation{ Name: opRestoreObject, @@ -5768,7 +5995,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" // This operation is not allowed against this storage tier // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { req, out := c.RestoreObjectRequest(input) return out, req.Send() @@ -5815,7 +6042,7 @@ const opUploadPart = "UploadPart" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { op := &request.Operation{ Name: opUploadPart, @@ -5848,7 +6075,7 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation UploadPart for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { req, out := c.UploadPartRequest(input) return out, req.Send() @@ -5895,7 +6122,7 @@ const opUploadPartCopy = "UploadPartCopy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { op := &request.Operation{ Name: opUploadPartCopy, @@ -5922,7 +6149,7 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation UploadPartCopy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { req, out := c.UploadPartCopyRequest(input) return out, req.Send() @@ -5946,7 +6173,7 @@ func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInp // Specifies the days since the initiation of an Incomplete Multipart Upload // that Lifecycle will wait before permanently removing all parts of the upload. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload type AbortIncompleteMultipartUpload struct { _ struct{} `type:"structure"` @@ -5971,7 +6198,7 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest type AbortMultipartUploadInput struct { _ struct{} `type:"structure"` @@ -6054,7 +6281,7 @@ func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadI return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput type AbortMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -6079,7 +6306,7 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration type AccelerateConfiguration struct { _ struct{} `type:"structure"` @@ -6103,7 +6330,7 @@ func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy type AccessControlPolicy struct { _ struct{} `type:"structure"` @@ -6155,7 +6382,47 @@ func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator +// Container for information regarding the access control for replicas. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlTranslation +type AccessControlTranslation struct { + _ struct{} `type:"structure"` + + // The override value for the owner of the replica object. + // + // Owner is a required field + Owner *string `type:"string" required:"true" enum:"OwnerOverride"` +} + +// String returns the string representation +func (s AccessControlTranslation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlTranslation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlTranslation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"} + if s.Owner == nil { + invalidParams.Add(request.NewErrParamRequired("Owner")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation { + s.Owner = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator type AnalyticsAndOperator struct { _ struct{} `type:"structure"` @@ -6208,7 +6475,7 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration type AnalyticsConfiguration struct { _ struct{} `type:"structure"` @@ -6283,7 +6550,7 @@ func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination type AnalyticsExportDestination struct { _ struct{} `type:"structure"` @@ -6327,7 +6594,7 @@ func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3Bucket return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter type AnalyticsFilter struct { _ struct{} `type:"structure"` @@ -6390,7 +6657,7 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination type AnalyticsS3BucketDestination struct { _ struct{} `type:"structure"` @@ -6470,7 +6737,7 @@ func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDes return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket type Bucket struct { _ struct{} `type:"structure"` @@ -6503,7 +6770,7 @@ func (s *Bucket) SetName(v string) *Bucket { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration type BucketLifecycleConfiguration struct { _ struct{} `type:"structure"` @@ -6550,7 +6817,7 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus type BucketLoggingStatus struct { _ struct{} `type:"structure"` @@ -6588,7 +6855,7 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration type CORSConfiguration struct { _ struct{} `type:"structure"` @@ -6635,7 +6902,7 @@ func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule type CORSRule struct { _ struct{} `type:"structure"` @@ -6719,7 +6986,141 @@ func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration +// Describes how a CSV-formatted input object is formatted. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CSVInput +type CSVInput struct { + _ struct{} `type:"structure"` + + // Single character used to indicate a row should be ignored when present at + // the start of a row. + Comments *string `type:"string"` + + // Value used to separate individual fields in a record. + FieldDelimiter *string `type:"string"` + + // Describes the first line of input. Valid values: None, Ignore, Use. + FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` + + // Value used for escaping where the field delimiter is part of the value. + QuoteCharacter *string `type:"string"` + + // Single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Value used to separate individual records. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVInput) GoString() string { + return s.String() +} + +// SetComments sets the Comments field's value. +func (s *CSVInput) SetComments(v string) *CSVInput { + s.Comments = &v + return s +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput { + s.FieldDelimiter = &v + return s +} + +// SetFileHeaderInfo sets the FileHeaderInfo field's value. +func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput { + s.FileHeaderInfo = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { + s.RecordDelimiter = &v + return s +} + +// Describes how CSV-formatted results are formatted. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CSVOutput +type CSVOutput struct { + _ struct{} `type:"structure"` + + // Value used to separate individual fields in a record. + FieldDelimiter *string `type:"string"` + + // Value used for escaping where the field delimiter is part of the value. + QuoteCharacter *string `type:"string"` + + // Single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Indicates whether or not all output fields should be quoted. + QuoteFields *string `type:"string" enum:"QuoteFields"` + + // Value used to separate individual records. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVOutput) GoString() string { + return s.String() +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput { + s.FieldDelimiter = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetQuoteFields sets the QuoteFields field's value. +func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput { + s.QuoteFields = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { + s.RecordDelimiter = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration type CloudFunctionConfiguration struct { _ struct{} `type:"structure"` @@ -6777,7 +7178,7 @@ func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionC return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix type CommonPrefix struct { _ struct{} `type:"structure"` @@ -6800,7 +7201,7 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest type CompleteMultipartUploadInput struct { _ struct{} `type:"structure" payload:"MultipartUpload"` @@ -6891,7 +7292,7 @@ func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartU return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput type CompleteMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -6995,7 +7396,7 @@ func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipar return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload type CompletedMultipartUpload struct { _ struct{} `type:"structure"` @@ -7018,7 +7419,7 @@ func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultip return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart type CompletedPart struct { _ struct{} `type:"structure"` @@ -7052,7 +7453,7 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition type Condition struct { _ struct{} `type:"structure"` @@ -7095,7 +7496,7 @@ func (s *Condition) SetKeyPrefixEquals(v string) *Condition { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest type CopyObjectInput struct { _ struct{} `type:"structure"` @@ -7479,7 +7880,7 @@ func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput type CopyObjectOutput struct { _ struct{} `type:"structure" payload:"CopyObjectResult"` @@ -7580,7 +7981,7 @@ func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult type CopyObjectResult struct { _ struct{} `type:"structure"` @@ -7611,7 +8012,7 @@ func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult type CopyPartResult struct { _ struct{} `type:"structure"` @@ -7644,7 +8045,7 @@ func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration type CreateBucketConfiguration struct { _ struct{} `type:"structure"` @@ -7669,7 +8070,7 @@ func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucke return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest type CreateBucketInput struct { _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` @@ -7776,7 +8177,7 @@ func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput type CreateBucketOutput struct { _ struct{} `type:"structure"` @@ -7799,7 +8200,7 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest type CreateMultipartUploadInput struct { _ struct{} `type:"structure"` @@ -8071,7 +8472,7 @@ func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *Creat return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput type CreateMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -8191,7 +8592,7 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete type Delete struct { _ struct{} `type:"structure"` @@ -8248,7 +8649,7 @@ func (s *Delete) SetQuiet(v bool) *Delete { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest type DeleteBucketAnalyticsConfigurationInput struct { _ struct{} `type:"structure"` @@ -8308,7 +8709,7 @@ func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketA return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput type DeleteBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -8323,7 +8724,7 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest type DeleteBucketCorsInput struct { _ struct{} `type:"structure"` @@ -8367,7 +8768,7 @@ func (s *DeleteBucketCorsInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput type DeleteBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -8382,7 +8783,69 @@ func (s DeleteBucketCorsOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryptionRequest +type DeleteBucketEncryptionInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the server-side encryption configuration + // to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryptionOutput +type DeleteBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketEncryptionOutput) GoString() string { + return s.String() +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest type DeleteBucketInput struct { _ struct{} `type:"structure"` @@ -8426,7 +8889,7 @@ func (s *DeleteBucketInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest type DeleteBucketInventoryConfigurationInput struct { _ struct{} `type:"structure"` @@ -8486,7 +8949,7 @@ func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketI return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput type DeleteBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -8501,7 +8964,7 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest type DeleteBucketLifecycleInput struct { _ struct{} `type:"structure"` @@ -8545,7 +9008,7 @@ func (s *DeleteBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput type DeleteBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -8560,7 +9023,7 @@ func (s DeleteBucketLifecycleOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest type DeleteBucketMetricsConfigurationInput struct { _ struct{} `type:"structure"` @@ -8620,7 +9083,7 @@ func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMet return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput type DeleteBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -8635,7 +9098,7 @@ func (s DeleteBucketMetricsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput type DeleteBucketOutput struct { _ struct{} `type:"structure"` } @@ -8650,7 +9113,7 @@ func (s DeleteBucketOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest type DeleteBucketPolicyInput struct { _ struct{} `type:"structure"` @@ -8694,7 +9157,7 @@ func (s *DeleteBucketPolicyInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput type DeleteBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -8709,7 +9172,7 @@ func (s DeleteBucketPolicyOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest type DeleteBucketReplicationInput struct { _ struct{} `type:"structure"` @@ -8753,7 +9216,7 @@ func (s *DeleteBucketReplicationInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput type DeleteBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -8768,7 +9231,7 @@ func (s DeleteBucketReplicationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest type DeleteBucketTaggingInput struct { _ struct{} `type:"structure"` @@ -8812,7 +9275,7 @@ func (s *DeleteBucketTaggingInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput type DeleteBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -8827,7 +9290,7 @@ func (s DeleteBucketTaggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest type DeleteBucketWebsiteInput struct { _ struct{} `type:"structure"` @@ -8871,7 +9334,7 @@ func (s *DeleteBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput type DeleteBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -8886,7 +9349,7 @@ func (s DeleteBucketWebsiteOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry type DeleteMarkerEntry struct { _ struct{} `type:"structure"` @@ -8946,7 +9409,7 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest type DeleteObjectInput struct { _ struct{} `type:"structure"` @@ -9036,7 +9499,7 @@ func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput type DeleteObjectOutput struct { _ struct{} `type:"structure"` @@ -9081,7 +9544,7 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest type DeleteObjectTaggingInput struct { _ struct{} `type:"structure"` @@ -9149,7 +9612,7 @@ func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingIn return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput type DeleteObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -9173,7 +9636,7 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest type DeleteObjectsInput struct { _ struct{} `type:"structure" payload:"Delete"` @@ -9256,7 +9719,7 @@ func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput type DeleteObjectsOutput struct { _ struct{} `type:"structure"` @@ -9297,7 +9760,7 @@ func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject type DeletedObject struct { _ struct{} `type:"structure"` @@ -9344,16 +9807,27 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination +// Container for replication destination information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination type Destination struct { _ struct{} `type:"structure"` + // Container for information regarding the access control for replicas. + AccessControlTranslation *AccessControlTranslation `type:"structure"` + + // Account ID of the destination bucket. Currently this is only being verified + // if Access Control Translation is enabled + Account *string `type:"string"` + // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store // replicas of the object identified by the rule. // // Bucket is a required field Bucket *string `type:"string" required:"true"` + // Container for information regarding encryption based configuration for replicas. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + // The class of storage used to store the object. StorageClass *string `type:"string" enum:"StorageClass"` } @@ -9374,6 +9848,11 @@ func (s *Destination) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.AccessControlTranslation != nil { + if err := s.AccessControlTranslation.Validate(); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -9381,6 +9860,18 @@ func (s *Destination) Validate() error { return nil } +// SetAccessControlTranslation sets the AccessControlTranslation field's value. +func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination { + s.AccessControlTranslation = v + return s +} + +// SetAccount sets the Account field's value. +func (s *Destination) SetAccount(v string) *Destination { + s.Account = &v + return s +} + // SetBucket sets the Bucket field's value. func (s *Destination) SetBucket(v string) *Destination { s.Bucket = &v @@ -9394,13 +9885,106 @@ func (s *Destination) getBucket() (v string) { return *s.Bucket } +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination { + s.EncryptionConfiguration = v + return s +} + // SetStorageClass sets the StorageClass field's value. func (s *Destination) SetStorageClass(v string) *Destination { s.StorageClass = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error +// Describes the server-side encryption that will be applied to the restore +// results. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Encryption +type Encryption struct { + _ struct{} `type:"structure"` + + // The server-side encryption algorithm used when storing job results in Amazon + // S3 (e.g., AES256, aws:kms). + // + // EncryptionType is a required field + EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` + + // If the encryption type is aws:kms, this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string `type:"string"` + + // If the encryption type is aws:kms, this optional value specifies the AWS + // KMS key ID to use for encryption of job results. + KMSKeyId *string `type:"string"` +} + +// String returns the string representation +func (s Encryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Encryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Encryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Encryption"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *Encryption) SetEncryptionType(v string) *Encryption { + s.EncryptionType = &v + return s +} + +// SetKMSContext sets the KMSContext field's value. +func (s *Encryption) SetKMSContext(v string) *Encryption { + s.KMSContext = &v + return s +} + +// SetKMSKeyId sets the KMSKeyId field's value. +func (s *Encryption) SetKMSKeyId(v string) *Encryption { + s.KMSKeyId = &v + return s +} + +// Container for information regarding encryption based configuration for replicas. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/EncryptionConfiguration +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // The id of the KMS key used to encrypt the replica object. + ReplicaKmsKeyID *string `type:"string"` +} + +// String returns the string representation +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value. +func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration { + s.ReplicaKmsKeyID = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error type Error struct { _ struct{} `type:"structure"` @@ -9447,7 +10031,7 @@ func (s *Error) SetVersionId(v string) *Error { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument type ErrorDocument struct { _ struct{} `type:"structure"` @@ -9490,7 +10074,7 @@ func (s *ErrorDocument) SetKey(v string) *ErrorDocument { } // Container for key value pair that defines the criteria for the filter rule. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule type FilterRule struct { _ struct{} `type:"structure"` @@ -9525,7 +10109,7 @@ func (s *FilterRule) SetValue(v string) *FilterRule { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest type GetBucketAccelerateConfigurationInput struct { _ struct{} `type:"structure"` @@ -9571,7 +10155,7 @@ func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput type GetBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` @@ -9595,7 +10179,7 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest type GetBucketAclInput struct { _ struct{} `type:"structure"` @@ -9639,7 +10223,7 @@ func (s *GetBucketAclInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput type GetBucketAclOutput struct { _ struct{} `type:"structure"` @@ -9671,7 +10255,7 @@ func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest type GetBucketAnalyticsConfigurationInput struct { _ struct{} `type:"structure"` @@ -9731,7 +10315,7 @@ func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyti return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput type GetBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` @@ -9755,7 +10339,7 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest type GetBucketCorsInput struct { _ struct{} `type:"structure"` @@ -9799,7 +10383,7 @@ func (s *GetBucketCorsInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput type GetBucketCorsOutput struct { _ struct{} `type:"structure"` @@ -9822,7 +10406,79 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryptionRequest +type GetBucketEncryptionInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which the server-side encryption configuration + // is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *GetBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryptionOutput +type GetBucketEncryptionOutput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Container for server-side encryption configuration rules. Currently S3 supports + // one rule only. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionOutput) GoString() string { + return s.String() +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput { + s.ServerSideEncryptionConfiguration = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest type GetBucketInventoryConfigurationInput struct { _ struct{} `type:"structure"` @@ -9882,7 +10538,7 @@ func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInvento return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput type GetBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure" payload:"InventoryConfiguration"` @@ -9906,7 +10562,7 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest type GetBucketLifecycleConfigurationInput struct { _ struct{} `type:"structure"` @@ -9950,7 +10606,7 @@ func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput type GetBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` @@ -9973,7 +10629,7 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest type GetBucketLifecycleInput struct { _ struct{} `type:"structure"` @@ -10017,7 +10673,7 @@ func (s *GetBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput type GetBucketLifecycleOutput struct { _ struct{} `type:"structure"` @@ -10040,7 +10696,7 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest type GetBucketLocationInput struct { _ struct{} `type:"structure"` @@ -10084,7 +10740,7 @@ func (s *GetBucketLocationInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput type GetBucketLocationOutput struct { _ struct{} `type:"structure"` @@ -10107,7 +10763,7 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest type GetBucketLoggingInput struct { _ struct{} `type:"structure"` @@ -10151,7 +10807,7 @@ func (s *GetBucketLoggingInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` @@ -10174,7 +10830,7 @@ func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucket return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest type GetBucketMetricsConfigurationInput struct { _ struct{} `type:"structure"` @@ -10234,7 +10890,7 @@ func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsCo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput type GetBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure" payload:"MetricsConfiguration"` @@ -10258,7 +10914,7 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest type GetBucketNotificationConfigurationRequest struct { _ struct{} `type:"structure"` @@ -10304,7 +10960,7 @@ func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest type GetBucketPolicyInput struct { _ struct{} `type:"structure"` @@ -10348,7 +11004,7 @@ func (s *GetBucketPolicyInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput type GetBucketPolicyOutput struct { _ struct{} `type:"structure" payload:"Policy"` @@ -10372,7 +11028,7 @@ func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest type GetBucketReplicationInput struct { _ struct{} `type:"structure"` @@ -10416,7 +11072,7 @@ func (s *GetBucketReplicationInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput type GetBucketReplicationOutput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` @@ -10441,7 +11097,7 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest type GetBucketRequestPaymentInput struct { _ struct{} `type:"structure"` @@ -10485,7 +11141,7 @@ func (s *GetBucketRequestPaymentInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput type GetBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` @@ -10509,7 +11165,7 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest type GetBucketTaggingInput struct { _ struct{} `type:"structure"` @@ -10553,7 +11209,7 @@ func (s *GetBucketTaggingInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput type GetBucketTaggingOutput struct { _ struct{} `type:"structure"` @@ -10577,7 +11233,7 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest type GetBucketVersioningInput struct { _ struct{} `type:"structure"` @@ -10621,7 +11277,7 @@ func (s *GetBucketVersioningInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput type GetBucketVersioningOutput struct { _ struct{} `type:"structure"` @@ -10656,7 +11312,7 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest type GetBucketWebsiteInput struct { _ struct{} `type:"structure"` @@ -10700,7 +11356,7 @@ func (s *GetBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput type GetBucketWebsiteOutput struct { _ struct{} `type:"structure"` @@ -10747,7 +11403,7 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest type GetObjectAclInput struct { _ struct{} `type:"structure"` @@ -10827,7 +11483,7 @@ func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput type GetObjectAclOutput struct { _ struct{} `type:"structure"` @@ -10869,7 +11525,7 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest type GetObjectInput struct { _ struct{} `type:"structure"` @@ -11104,7 +11760,7 @@ func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput type GetObjectOutput struct { _ struct{} `type:"structure" payload:"Body"` @@ -11388,7 +12044,7 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest type GetObjectTaggingInput struct { _ struct{} `type:"structure"` @@ -11455,7 +12111,7 @@ func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput type GetObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -11487,7 +12143,7 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest type GetObjectTorrentInput struct { _ struct{} `type:"structure"` @@ -11558,7 +12214,7 @@ func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput type GetObjectTorrentOutput struct { _ struct{} `type:"structure" payload:"Body"` @@ -11591,7 +12247,7 @@ func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOu return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters type GlacierJobParameters struct { _ struct{} `type:"structure"` @@ -11630,7 +12286,7 @@ func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant type Grant struct { _ struct{} `type:"structure"` @@ -11677,7 +12333,7 @@ func (s *Grant) SetPermission(v string) *Grant { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee type Grantee struct { _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` @@ -11752,7 +12408,7 @@ func (s *Grantee) SetURI(v string) *Grantee { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest type HeadBucketInput struct { _ struct{} `type:"structure"` @@ -11796,7 +12452,7 @@ func (s *HeadBucketInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput type HeadBucketOutput struct { _ struct{} `type:"structure"` } @@ -11811,7 +12467,7 @@ func (s HeadBucketOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest type HeadObjectInput struct { _ struct{} `type:"structure"` @@ -11993,7 +12649,7 @@ func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput type HeadObjectOutput struct { _ struct{} `type:"structure"` @@ -12250,7 +12906,7 @@ func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutpu return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument type IndexDocument struct { _ struct{} `type:"structure"` @@ -12292,7 +12948,7 @@ func (s *IndexDocument) SetSuffix(v string) *IndexDocument { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator type Initiator struct { _ struct{} `type:"structure"` @@ -12326,7 +12982,32 @@ func (s *Initiator) SetID(v string) *Initiator { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration +// Describes the serialization format of the object. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InputSerialization +type InputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput `type:"structure"` +} + +// String returns the string representation +func (s InputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { + s.CSV = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration type InventoryConfiguration struct { _ struct{} `type:"structure"` @@ -12455,7 +13136,7 @@ func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryCon return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination type InventoryDestination struct { _ struct{} `type:"structure"` @@ -12500,7 +13181,57 @@ func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestin return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter +// Contains the type of server-side encryption used to encrypt the inventory +// results. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryEncryption +type InventoryEncryption struct { + _ struct{} `type:"structure"` + + // Specifies the use of SSE-KMS to encrypt delievered Inventory reports. + SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` + + // Specifies the use of SSE-S3 to encrypt delievered Inventory reports. + SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s InventoryEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryEncryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"} + if s.SSEKMS != nil { + if err := s.SSEKMS.Validate(); err != nil { + invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSSEKMS sets the SSEKMS field's value. +func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption { + s.SSEKMS = v + return s +} + +// SetSSES3 sets the SSES3 field's value. +func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { + s.SSES3 = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter type InventoryFilter struct { _ struct{} `type:"structure"` @@ -12539,7 +13270,7 @@ func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination type InventoryS3BucketDestination struct { _ struct{} `type:"structure"` @@ -12552,6 +13283,10 @@ type InventoryS3BucketDestination struct { // Bucket is a required field Bucket *string `type:"string" required:"true"` + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption `type:"structure"` + // Specifies the output format of the inventory results. // // Format is a required field @@ -12580,6 +13315,11 @@ func (s *InventoryS3BucketDestination) Validate() error { if s.Format == nil { invalidParams.Add(request.NewErrParamRequired("Format")) } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -12606,6 +13346,12 @@ func (s *InventoryS3BucketDestination) getBucket() (v string) { return *s.Bucket } +// SetEncryption sets the Encryption field's value. +func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination { + s.Encryption = v + return s +} + // SetFormat sets the Format field's value. func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { s.Format = &v @@ -12618,7 +13364,7 @@ func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDes return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule type InventorySchedule struct { _ struct{} `type:"structure"` @@ -12658,7 +13404,7 @@ func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { } // Container for object key name prefix and suffix filtering rules. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter type KeyFilter struct { _ struct{} `type:"structure"` @@ -12684,7 +13430,7 @@ func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { } // Container for specifying the AWS Lambda notification configuration. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration type LambdaFunctionConfiguration struct { _ struct{} `type:"structure"` @@ -12756,7 +13502,7 @@ func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunc return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration type LifecycleConfiguration struct { _ struct{} `type:"structure"` @@ -12803,7 +13549,7 @@ func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration type LifecycleExpiration struct { _ struct{} `type:"structure"` @@ -12850,7 +13596,7 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule type LifecycleRule struct { _ struct{} `type:"structure"` @@ -12974,7 +13720,7 @@ func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { // This is used in a Lifecycle Rule Filter to apply a logical AND to two or // more predicates. The Lifecycle Rule will apply to any object matching all // of the predicates configured inside the And operator. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator type LifecycleRuleAndOperator struct { _ struct{} `type:"structure"` @@ -13029,7 +13775,7 @@ func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { // The Filter is used to identify objects that a Lifecycle Rule applies to. // A Filter must have exactly one of Prefix, Tag, or And specified. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter type LifecycleRuleFilter struct { _ struct{} `type:"structure"` @@ -13093,7 +13839,7 @@ func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest type ListBucketAnalyticsConfigurationsInput struct { _ struct{} `type:"structure"` @@ -13149,7 +13895,7 @@ func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput type ListBucketAnalyticsConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -13204,7 +13950,7 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest type ListBucketInventoryConfigurationsInput struct { _ struct{} `type:"structure"` @@ -13262,7 +14008,7 @@ func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput type ListBucketInventoryConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -13317,7 +14063,7 @@ func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v str return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest type ListBucketMetricsConfigurationsInput struct { _ struct{} `type:"structure"` @@ -13375,7 +14121,7 @@ func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *L return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput type ListBucketMetricsConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -13432,7 +14178,7 @@ func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v strin return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput type ListBucketsInput struct { _ struct{} `type:"structure"` } @@ -13447,7 +14193,7 @@ func (s ListBucketsInput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput type ListBucketsOutput struct { _ struct{} `type:"structure"` @@ -13478,7 +14224,7 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest type ListMultipartUploadsInput struct { _ struct{} `type:"structure"` @@ -13587,7 +14333,7 @@ func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput type ListMultipartUploadsOutput struct { _ struct{} `type:"structure"` @@ -13721,7 +14467,7 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest type ListObjectVersionsInput struct { _ struct{} `type:"structure"` @@ -13825,7 +14571,7 @@ func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersio return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput type ListObjectVersionsOutput struct { _ struct{} `type:"structure"` @@ -13953,7 +14699,7 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest type ListObjectsInput struct { _ struct{} `type:"structure"` @@ -14059,7 +14805,7 @@ func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput type ListObjectsOutput struct { _ struct{} `type:"structure"` @@ -14164,7 +14910,7 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request type ListObjectsV2Input struct { _ struct{} `type:"structure"` @@ -14290,7 +15036,7 @@ func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output type ListObjectsV2Output struct { _ struct{} `type:"structure"` @@ -14424,7 +15170,7 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest type ListPartsInput struct { _ struct{} `type:"structure"` @@ -14528,7 +15274,7 @@ func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput type ListPartsOutput struct { _ struct{} `type:"structure"` @@ -14678,7 +15424,136 @@ func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled +// Describes an S3 location that will receive the results of the restore request. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3Location +type Location struct { + _ struct{} `type:"structure"` + + // A list of grants that control access to the staged results. + AccessControlList []*Grant `locationNameList:"Grant" type:"list"` + + // The name of the bucket where the restore results will be placed. + // + // BucketName is a required field + BucketName *string `type:"string" required:"true"` + + // The canned ACL to apply to the restore results. + CannedACL *string `type:"string" enum:"ObjectCannedACL"` + + // Describes the server-side encryption that will be applied to the restore + // results. + Encryption *Encryption `type:"structure"` + + // The prefix that is prepended to the restore results for this request. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The class of storage used to store the restore results. + StorageClass *string `type:"string" enum:"StorageClass"` + + // The tag-set that is applied to the restore results. + Tagging *Tagging `type:"structure"` + + // A list of metadata to store with the restore results in S3. + UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` +} + +// String returns the string representation +func (s Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Location) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Location"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.AccessControlList != nil { + for i, v := range s.AccessControlList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlList sets the AccessControlList field's value. +func (s *Location) SetAccessControlList(v []*Grant) *Location { + s.AccessControlList = v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *Location) SetBucketName(v string) *Location { + s.BucketName = &v + return s +} + +// SetCannedACL sets the CannedACL field's value. +func (s *Location) SetCannedACL(v string) *Location { + s.CannedACL = &v + return s +} + +// SetEncryption sets the Encryption field's value. +func (s *Location) SetEncryption(v *Encryption) *Location { + s.Encryption = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Location) SetPrefix(v string) *Location { + s.Prefix = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Location) SetStorageClass(v string) *Location { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *Location) SetTagging(v *Tagging) *Location { + s.Tagging = v + return s +} + +// SetUserMetadata sets the UserMetadata field's value. +func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { + s.UserMetadata = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled type LoggingEnabled struct { _ struct{} `type:"structure"` @@ -14745,7 +15620,39 @@ func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator +// A metadata key-value pair to store with an object. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetadataEntry +type MetadataEntry struct { + _ struct{} `type:"structure"` + + Name *string `type:"string"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s MetadataEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetadataEntry) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *MetadataEntry) SetName(v string) *MetadataEntry { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetadataEntry) SetValue(v string) *MetadataEntry { + s.Value = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator type MetricsAndOperator struct { _ struct{} `type:"structure"` @@ -14798,7 +15705,7 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration type MetricsConfiguration struct { _ struct{} `type:"structure"` @@ -14853,7 +15760,7 @@ func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter type MetricsFilter struct { _ struct{} `type:"structure"` @@ -14917,7 +15824,7 @@ func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload type MultipartUpload struct { _ struct{} `type:"structure"` @@ -14990,7 +15897,7 @@ func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { // configuration action on a bucket that has versioning enabled (or suspended) // to request that Amazon S3 delete noncurrent object versions at a specific // period in the object's lifetime. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration type NoncurrentVersionExpiration struct { _ struct{} `type:"structure"` @@ -15022,7 +15929,7 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers // versioning-enabled (or versioning is suspended), you can set this action // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA // or GLACIER storage class at a specific period in the object's lifetime. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` @@ -15060,7 +15967,7 @@ func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersi // Container for specifying the notification configuration of the bucket. If // this element is empty, notifications are turned off on the bucket. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration type NotificationConfiguration struct { _ struct{} `type:"structure"` @@ -15139,7 +16046,7 @@ func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfigurati return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated type NotificationConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -15180,7 +16087,7 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf // Container for object key name filtering rules. For information about key // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` @@ -15204,7 +16111,7 @@ func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConf return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object type Object struct { _ struct{} `type:"structure"` @@ -15268,7 +16175,7 @@ func (s *Object) SetStorageClass(v string) *Object { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier type ObjectIdentifier struct { _ struct{} `type:"structure"` @@ -15319,7 +16226,7 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion type ObjectVersion struct { _ struct{} `type:"structure"` @@ -15405,7 +16312,72 @@ func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner +// Describes the location where the restore job's output is stored. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OutputLocation +type OutputLocation struct { + _ struct{} `type:"structure"` + + // Describes an S3 location that will receive the results of the restore request. + S3 *Location `type:"structure"` +} + +// String returns the string representation +func (s OutputLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputLocation"} + if s.S3 != nil { + if err := s.S3.Validate(); err != nil { + invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3 sets the S3 field's value. +func (s *OutputLocation) SetS3(v *Location) *OutputLocation { + s.S3 = v + return s +} + +// Describes how results of the Select job are serialized. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OutputSerialization +type OutputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput `type:"structure"` +} + +// String returns the string representation +func (s OutputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { + s.CSV = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner type Owner struct { _ struct{} `type:"structure"` @@ -15436,7 +16408,7 @@ func (s *Owner) SetID(v string) *Owner { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part type Part struct { _ struct{} `type:"structure"` @@ -15488,7 +16460,7 @@ func (s *Part) SetSize(v int64) *Part { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest type PutBucketAccelerateConfigurationInput struct { _ struct{} `type:"structure" payload:"AccelerateConfiguration"` @@ -15548,7 +16520,7 @@ func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput type PutBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -15563,7 +16535,7 @@ func (s PutBucketAccelerateConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest type PutBucketAclInput struct { _ struct{} `type:"structure" payload:"AccessControlPolicy"` @@ -15675,7 +16647,7 @@ func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput type PutBucketAclOutput struct { _ struct{} `type:"structure"` } @@ -15690,7 +16662,7 @@ func (s PutBucketAclOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest type PutBucketAnalyticsConfigurationInput struct { _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` @@ -15769,7 +16741,7 @@ func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyti return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput type PutBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -15784,7 +16756,7 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest type PutBucketCorsInput struct { _ struct{} `type:"structure" payload:"CORSConfiguration"` @@ -15845,7 +16817,7 @@ func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBuck return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput type PutBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -15860,7 +16832,89 @@ func (s PutBucketCorsOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryptionRequest +type PutBucketEncryptionInput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // The name of the bucket for which the server-side encryption configuration + // is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for server-side encryption configuration rules. Currently S3 supports + // one rule only. + // + // ServerSideEncryptionConfiguration is a required field + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) + } + if s.ServerSideEncryptionConfiguration != nil { + if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *PutBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { + s.ServerSideEncryptionConfiguration = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryptionOutput +type PutBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketEncryptionOutput) GoString() string { + return s.String() +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest type PutBucketInventoryConfigurationInput struct { _ struct{} `type:"structure" payload:"InventoryConfiguration"` @@ -15939,7 +16993,7 @@ func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *Inve return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput type PutBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -15954,7 +17008,7 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest type PutBucketLifecycleConfigurationInput struct { _ struct{} `type:"structure" payload:"LifecycleConfiguration"` @@ -16011,7 +17065,7 @@ func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *Buck return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput type PutBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -16026,7 +17080,7 @@ func (s PutBucketLifecycleConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest type PutBucketLifecycleInput struct { _ struct{} `type:"structure" payload:"LifecycleConfiguration"` @@ -16083,7 +17137,7 @@ func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfigur return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput type PutBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -16098,7 +17152,7 @@ func (s PutBucketLifecycleOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest type PutBucketLoggingInput struct { _ struct{} `type:"structure" payload:"BucketLoggingStatus"` @@ -16159,7 +17213,7 @@ func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) * return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput type PutBucketLoggingOutput struct { _ struct{} `type:"structure"` } @@ -16174,7 +17228,7 @@ func (s PutBucketLoggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest type PutBucketMetricsConfigurationInput struct { _ struct{} `type:"structure" payload:"MetricsConfiguration"` @@ -16253,7 +17307,7 @@ func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsC return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput type PutBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -16268,7 +17322,7 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest type PutBucketNotificationConfigurationInput struct { _ struct{} `type:"structure" payload:"NotificationConfiguration"` @@ -16332,7 +17386,7 @@ func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput type PutBucketNotificationConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -16347,7 +17401,7 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest type PutBucketNotificationInput struct { _ struct{} `type:"structure" payload:"NotificationConfiguration"` @@ -16403,7 +17457,7 @@ func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *Notificatio return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput type PutBucketNotificationOutput struct { _ struct{} `type:"structure"` } @@ -16418,13 +17472,17 @@ func (s PutBucketNotificationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest type PutBucketPolicyInput struct { _ struct{} `type:"structure" payload:"Policy"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` + // The bucket policy as a JSON document. // // Policy is a required field @@ -16470,13 +17528,19 @@ func (s *PutBucketPolicyInput) getBucket() (v string) { return *s.Bucket } +// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. +func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { + s.ConfirmRemoveSelfBucketAccess = &v + return s +} + // SetPolicy sets the Policy field's value. func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { s.Policy = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput type PutBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -16491,7 +17555,7 @@ func (s PutBucketPolicyOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest type PutBucketReplicationInput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` @@ -16555,7 +17619,7 @@ func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationCo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -16570,7 +17634,7 @@ func (s PutBucketReplicationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest type PutBucketRequestPaymentInput struct { _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` @@ -16631,7 +17695,7 @@ func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *Request return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput type PutBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` } @@ -16646,7 +17710,7 @@ func (s PutBucketRequestPaymentOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest type PutBucketTaggingInput struct { _ struct{} `type:"structure" payload:"Tagging"` @@ -16707,7 +17771,7 @@ func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput type PutBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -16722,7 +17786,7 @@ func (s PutBucketTaggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest type PutBucketVersioningInput struct { _ struct{} `type:"structure" payload:"VersioningConfiguration"` @@ -16788,7 +17852,7 @@ func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfi return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput type PutBucketVersioningOutput struct { _ struct{} `type:"structure"` } @@ -16803,7 +17867,7 @@ func (s PutBucketVersioningOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest type PutBucketWebsiteInput struct { _ struct{} `type:"structure" payload:"WebsiteConfiguration"` @@ -16864,7 +17928,7 @@ func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput type PutBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -16879,7 +17943,7 @@ func (s PutBucketWebsiteOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest type PutObjectAclInput struct { _ struct{} `type:"structure" payload:"AccessControlPolicy"` @@ -17027,7 +18091,7 @@ func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput type PutObjectAclOutput struct { _ struct{} `type:"structure"` @@ -17052,7 +18116,7 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest type PutObjectInput struct { _ struct{} `type:"structure" payload:"Body"` @@ -17085,6 +18149,9 @@ type PutObjectInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` @@ -17238,6 +18305,12 @@ func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { return s } +// SetContentMD5 sets the ContentMD5 field's value. +func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { + s.ContentMD5 = &v + return s +} + // SetContentType sets the ContentType field's value. func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { s.ContentType = &v @@ -17347,7 +18420,7 @@ func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput type PutObjectOutput struct { _ struct{} `type:"structure"` @@ -17442,7 +18515,7 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest type PutObjectTaggingInput struct { _ struct{} `type:"structure" payload:"Tagging"` @@ -17526,7 +18599,7 @@ func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput type PutObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -17551,7 +18624,7 @@ func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput // Container for specifying an configuration when you want Amazon S3 to publish // events to an Amazon Simple Queue Service (Amazon SQS) queue. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration type QueueConfiguration struct { _ struct{} `type:"structure"` @@ -17623,7 +18696,7 @@ func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated type QueueConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -17673,7 +18746,7 @@ func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDep return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect type Redirect struct { _ struct{} `type:"structure"` @@ -17742,7 +18815,7 @@ func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo type RedirectAllRequestsTo struct { _ struct{} `type:"structure"` @@ -17793,7 +18866,7 @@ func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { // Container for replication rules. You can add as many as 1,000 rules. Total // replication configuration size can be up to 2 MB. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration type ReplicationConfiguration struct { _ struct{} `type:"structure"` @@ -17858,10 +18931,13 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule +// Container for information about a particular replication rule. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule type ReplicationRule struct { _ struct{} `type:"structure"` + // Container for replication destination information. + // // Destination is a required field Destination *Destination `type:"structure" required:"true"` @@ -17875,6 +18951,9 @@ type ReplicationRule struct { // Prefix is a required field Prefix *string `type:"string" required:"true"` + // Container for filters that define which source objects should be replicated. + SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` + // The rule is ignored if status is not Enabled. // // Status is a required field @@ -17908,6 +18987,11 @@ func (s *ReplicationRule) Validate() error { invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) } } + if s.SourceSelectionCriteria != nil { + if err := s.SourceSelectionCriteria.Validate(); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -17933,13 +19017,19 @@ func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { return s } +// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. +func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { + s.SourceSelectionCriteria = v + return s +} + // SetStatus sets the Status field's value. func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { s.Status = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration type RequestPaymentConfiguration struct { _ struct{} `type:"structure"` @@ -17978,7 +19068,7 @@ func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfigur return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest type RestoreObjectInput struct { _ struct{} `type:"structure" payload:"RestoreRequest"` @@ -17994,6 +19084,7 @@ type RestoreObjectInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Container for restore job parameters. RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` VersionId *string `location:"querystring" locationName:"versionId" type:"string"` @@ -18070,13 +19161,17 @@ func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput type RestoreObjectOutput struct { _ struct{} `type:"structure"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Indicates the path in the provided S3 output location where Select results + // will be restored to. + RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` } // String returns the string representation @@ -18095,17 +19190,39 @@ func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest +// SetRestoreOutputPath sets the RestoreOutputPath field's value. +func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput { + s.RestoreOutputPath = &v + return s +} + +// Container for restore job parameters. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest type RestoreRequest struct { _ struct{} `type:"structure"` - // Lifetime of the active copy in days - // - // Days is a required field - Days *int64 `type:"integer" required:"true"` + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. + Days *int64 `type:"integer"` - // Glacier related prameters pertaining to this job. + // The optional description for the job. + Description *string `type:"string"` + + // Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. GlacierJobParameters *GlacierJobParameters `type:"structure"` + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation `type:"structure"` + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters `type:"structure"` + + // Glacier retrieval tier at which the restore will be processed. + Tier *string `type:"string" enum:"Tier"` + + // Type of restore request. + Type *string `type:"string" enum:"RestoreRequestType"` } // String returns the string representation @@ -18121,14 +19238,21 @@ func (s RestoreRequest) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *RestoreRequest) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} - if s.Days == nil { - invalidParams.Add(request.NewErrParamRequired("Days")) - } if s.GlacierJobParameters != nil { if err := s.GlacierJobParameters.Validate(); err != nil { invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) } } + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } + } + if s.SelectParameters != nil { + if err := s.SelectParameters.Validate(); err != nil { + invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -18142,13 +19266,43 @@ func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { return s } +// SetDescription sets the Description field's value. +func (s *RestoreRequest) SetDescription(v string) *RestoreRequest { + s.Description = &v + return s +} + // SetGlacierJobParameters sets the GlacierJobParameters field's value. func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { s.GlacierJobParameters = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule +// SetOutputLocation sets the OutputLocation field's value. +func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest { + s.OutputLocation = v + return s +} + +// SetSelectParameters sets the SelectParameters field's value. +func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest { + s.SelectParameters = v + return s +} + +// SetTier sets the Tier field's value. +func (s *RestoreRequest) SetTier(v string) *RestoreRequest { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *RestoreRequest) SetType(v string) *RestoreRequest { + s.Type = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule type RoutingRule struct { _ struct{} `type:"structure"` @@ -18201,7 +19355,7 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule type Rule struct { _ struct{} `type:"structure"` @@ -18316,7 +19470,374 @@ func (s *Rule) SetTransition(v *Transition) *Rule { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis +// Specifies the use of SSE-KMS to encrypt delievered Inventory reports. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SSEKMS +type SSEKMS struct { + _ struct{} `locationName:"SSE-KMS" type:"structure"` + + // Specifies the ID of the AWS Key Management Service (KMS) master encryption + // key to use for encrypting Inventory reports. + // + // KeyId is a required field + KeyId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SSEKMS) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSEKMS) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SSEKMS) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SSEKMS"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKeyId sets the KeyId field's value. +func (s *SSEKMS) SetKeyId(v string) *SSEKMS { + s.KeyId = &v + return s +} + +// Specifies the use of SSE-S3 to encrypt delievered Inventory reports. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SSES3 +type SSES3 struct { + _ struct{} `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s SSES3) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSES3) GoString() string { + return s.String() +} + +// Describes the parameters for Select job types. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectParameters +type SelectParameters struct { + _ struct{} `type:"structure"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (e.g., SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the serialization format of the object. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // Describes how the results of the Select job are serialized. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SelectParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectParameters"} + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpression sets the Expression field's value. +func (s *SelectParameters) SetExpression(v string) *SelectParameters { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectParameters) SetExpressionType(v string) *SelectParameters { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters { + s.InputSerialization = v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters { + s.OutputSerialization = v + return s +} + +// Describes the default server-side encryption to apply to new objects in the +// bucket. If Put Object request does not specify any server-side encryption, +// this default encryption will be applied. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionByDefault +type ServerSideEncryptionByDefault struct { + _ struct{} `type:"structure"` + + // KMS master key ID to use for the default encryption. This parameter is allowed + // if SSEAlgorithm is aws:kms. + KMSMasterKeyID *string `type:"string"` + + // Server-side encryption algorithm to use for the default encryption. + // + // SSEAlgorithm is a required field + SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s ServerSideEncryptionByDefault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionByDefault) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionByDefault) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"} + if s.SSEAlgorithm == nil { + invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKMSMasterKeyID sets the KMSMasterKeyID field's value. +func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault { + s.KMSMasterKeyID = &v + return s +} + +// SetSSEAlgorithm sets the SSEAlgorithm field's value. +func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault { + s.SSEAlgorithm = &v + return s +} + +// Container for server-side encryption configuration rules. Currently S3 supports +// one rule only. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionConfiguration +type ServerSideEncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Container for information about a particular server-side encryption configuration + // rule. + // + // Rules is a required field + Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ServerSideEncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration { + s.Rules = v + return s +} + +// Container for information about a particular server-side encryption configuration +// rule. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionRule +type ServerSideEncryptionRule struct { + _ struct{} `type:"structure"` + + // Describes the default server-side encryption to apply to new objects in the + // bucket. If Put Object request does not specify any server-side encryption, + // this default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` +} + +// String returns the string representation +func (s ServerSideEncryptionRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"} + if s.ApplyServerSideEncryptionByDefault != nil { + if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value. +func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule { + s.ApplyServerSideEncryptionByDefault = v + return s +} + +// Container for filters that define which source objects should be replicated. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SourceSelectionCriteria +type SourceSelectionCriteria struct { + _ struct{} `type:"structure"` + + // Container for filter information of selection of KMS Encrypted S3 objects. + SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` +} + +// String returns the string representation +func (s SourceSelectionCriteria) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SourceSelectionCriteria) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SourceSelectionCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} + if s.SseKmsEncryptedObjects != nil { + if err := s.SseKmsEncryptedObjects.Validate(); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. +func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { + s.SseKmsEncryptedObjects = v + return s +} + +// Container for filter information of selection of KMS Encrypted S3 objects. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SseKmsEncryptedObjects +type SseKmsEncryptedObjects struct { + _ struct{} `type:"structure"` + + // The replication for KMS encrypted S3 objects is disabled if status is not + // Enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` +} + +// String returns the string representation +func (s SseKmsEncryptedObjects) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SseKmsEncryptedObjects) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SseKmsEncryptedObjects) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { + s.Status = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis type StorageClassAnalysis struct { _ struct{} `type:"structure"` @@ -18356,7 +19877,7 @@ func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport type StorageClassAnalysisDataExport struct { _ struct{} `type:"structure"` @@ -18414,7 +19935,7 @@ func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *Stora return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag type Tag struct { _ struct{} `type:"structure"` @@ -18470,7 +19991,7 @@ func (s *Tag) SetValue(v string) *Tag { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging type Tagging struct { _ struct{} `type:"structure"` @@ -18517,7 +20038,7 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant type TargetGrant struct { _ struct{} `type:"structure"` @@ -18566,7 +20087,7 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { // Container for specifying the configuration when you want Amazon S3 to publish // events to an Amazon Simple Notification Service (Amazon SNS) topic. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration type TopicConfiguration struct { _ struct{} `type:"structure"` @@ -18638,7 +20159,7 @@ func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated type TopicConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -18690,7 +20211,7 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition type Transition struct { _ struct{} `type:"structure"` @@ -18734,7 +20255,7 @@ func (s *Transition) SetStorageClass(v string) *Transition { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest type UploadPartCopyInput struct { _ struct{} `type:"structure"` @@ -18978,7 +20499,7 @@ func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` @@ -19063,7 +20584,7 @@ func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopy return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest type UploadPartInput struct { _ struct{} `type:"structure" payload:"Body"` @@ -19079,6 +20600,9 @@ type UploadPartInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + // Object key for which the multipart upload was initiated. // // Key is a required field @@ -19178,6 +20702,12 @@ func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { return s } +// SetContentMD5 sets the ContentMD5 field's value. +func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { + s.ContentMD5 = &v + return s +} + // SetKey sets the Key field's value. func (s *UploadPartInput) SetKey(v string) *UploadPartInput { s.Key = &v @@ -19227,7 +20757,7 @@ func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput type UploadPartOutput struct { _ struct{} `type:"structure"` @@ -19303,7 +20833,7 @@ func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration type VersioningConfiguration struct { _ struct{} `type:"structure"` @@ -19338,7 +20868,7 @@ func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration type WebsiteConfiguration struct { _ struct{} `type:"structure"` @@ -19550,6 +21080,22 @@ const ( ExpirationStatusDisabled = "Disabled" ) +const ( + // ExpressionTypeSql is a ExpressionType enum value + ExpressionTypeSql = "SQL" +) + +const ( + // FileHeaderInfoUse is a FileHeaderInfo enum value + FileHeaderInfoUse = "USE" + + // FileHeaderInfoIgnore is a FileHeaderInfo enum value + FileHeaderInfoIgnore = "IGNORE" + + // FileHeaderInfoNone is a FileHeaderInfo enum value + FileHeaderInfoNone = "NONE" +) + const ( // FilterRuleNamePrefix is a FilterRuleName enum value FilterRuleNamePrefix = "prefix" @@ -19561,6 +21107,9 @@ const ( const ( // InventoryFormatCsv is a InventoryFormat enum value InventoryFormatCsv = "CSV" + + // InventoryFormatOrc is a InventoryFormat enum value + InventoryFormatOrc = "ORC" ) const ( @@ -19597,6 +21146,9 @@ const ( // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value InventoryOptionalFieldReplicationStatus = "ReplicationStatus" + + // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value + InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" ) const ( @@ -19662,6 +21214,11 @@ const ( ObjectVersionStorageClassStandard = "STANDARD" ) +const ( + // OwnerOverrideDestination is a OwnerOverride enum value + OwnerOverrideDestination = "Destination" +) + const ( // PayerRequester is a Payer enum value PayerRequester = "Requester" @@ -19695,6 +21252,14 @@ const ( ProtocolHttps = "https" ) +const ( + // QuoteFieldsAlways is a QuoteFields enum value + QuoteFieldsAlways = "ALWAYS" + + // QuoteFieldsAsneeded is a QuoteFields enum value + QuoteFieldsAsneeded = "ASNEEDED" +) + const ( // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value ReplicationRuleStatusEnabled = "Enabled" @@ -19733,6 +21298,11 @@ const ( RequestPayerRequester = "requester" ) +const ( + // RestoreRequestTypeSelect is a RestoreRequestType enum value + RestoreRequestTypeSelect = "SELECT" +) + const ( // ServerSideEncryptionAes256 is a ServerSideEncryption enum value ServerSideEncryptionAes256 = "AES256" @@ -19741,6 +21311,14 @@ const ( ServerSideEncryptionAwsKms = "aws:kms" ) +const ( + // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusEnabled = "Enabled" + + // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusDisabled = "Disabled" +) + const ( // StorageClassStandard is a StorageClass enum value StorageClassStandard = "STANDARD" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go index 30068d159..0def02255 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go @@ -10,7 +10,7 @@ // // Using the Client // -// To Amazon Simple Storage Service with the SDK use the New function to create +// To contact Amazon Simple Storage Service with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. // These clients are safe to use concurrently. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go index b794a63ba..39b912c26 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -35,7 +35,7 @@ // // The s3manager package's Downloader provides concurrently downloading of Objects // from S3. The Downloader will write S3 Object content with an io.WriterAt. -// Once the Downloader instance is created you can call Upload concurrently from +// Once the Downloader instance is created you can call Download concurrently from // multiple goroutines safely. // // // The session the S3 Downloader will use @@ -56,7 +56,7 @@ // Key: aws.String(myString), // }) // if err != nil { -// return fmt.Errorf("failed to upload file, %v", err) +// return fmt.Errorf("failed to download file, %v", err) // } // fmt.Printf("file downloaded, %d bytes\n", n) // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go index 34e71df0f..28c30d976 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -92,6 +92,10 @@ type S3API interface { DeleteBucketCorsWithContext(aws.Context, *s3.DeleteBucketCorsInput, ...request.Option) (*s3.DeleteBucketCorsOutput, error) DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) + DeleteBucketEncryption(*s3.DeleteBucketEncryptionInput) (*s3.DeleteBucketEncryptionOutput, error) + DeleteBucketEncryptionWithContext(aws.Context, *s3.DeleteBucketEncryptionInput, ...request.Option) (*s3.DeleteBucketEncryptionOutput, error) + DeleteBucketEncryptionRequest(*s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput) + DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error) DeleteBucketInventoryConfigurationWithContext(aws.Context, *s3.DeleteBucketInventoryConfigurationInput, ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error) DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput) @@ -148,6 +152,10 @@ type S3API interface { GetBucketCorsWithContext(aws.Context, *s3.GetBucketCorsInput, ...request.Option) (*s3.GetBucketCorsOutput, error) GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) + GetBucketEncryption(*s3.GetBucketEncryptionInput) (*s3.GetBucketEncryptionOutput, error) + GetBucketEncryptionWithContext(aws.Context, *s3.GetBucketEncryptionInput, ...request.Option) (*s3.GetBucketEncryptionOutput, error) + GetBucketEncryptionRequest(*s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput) + GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error) GetBucketInventoryConfigurationWithContext(aws.Context, *s3.GetBucketInventoryConfigurationInput, ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error) GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput) @@ -295,6 +303,10 @@ type S3API interface { PutBucketCorsWithContext(aws.Context, *s3.PutBucketCorsInput, ...request.Option) (*s3.PutBucketCorsOutput, error) PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) + PutBucketEncryption(*s3.PutBucketEncryptionInput) (*s3.PutBucketEncryptionOutput, error) + PutBucketEncryptionWithContext(aws.Context, *s3.PutBucketEncryptionInput, ...request.Option) (*s3.PutBucketEncryptionOutput, error) + PutBucketEncryptionRequest(*s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput) + PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error) PutBucketInventoryConfigurationWithContext(aws.Context, *s3.PutBucketInventoryConfigurationInput, ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error) PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go index 33931c6a6..7f1674f4f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go @@ -60,7 +60,15 @@ func newError(err error, bucket, key *string) Error { } func (err *Error) Error() string { - return fmt.Sprintf("failed to upload %q to %q:\n%s", err.Key, err.Bucket, err.OrigErr.Error()) + origErr := "" + if err.OrigErr != nil { + origErr = ":\n" + err.OrigErr.Error() + } + return fmt.Sprintf("failed to upload %q to %q%s", + aws.StringValue(err.Key), + aws.StringValue(err.Bucket), + origErr, + ) } // NewBatchError will return a BatchError that satisfies the awserr.Error interface. @@ -206,7 +214,7 @@ type BatchDelete struct { // }, // } // -// if err := batcher.Delete(&s3manager.DeleteObjectsIterator{ +// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{ // Objects: objects, // }); err != nil { // return err @@ -239,7 +247,7 @@ func NewBatchDeleteWithClient(client s3iface.S3API, options ...func(*BatchDelete // }, // } // -// if err := batcher.Delete(&s3manager.DeleteObjectsIterator{ +// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{ // Objects: objects, // }); err != nil { // return err diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go index fc1f47205..a4079b83e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go @@ -117,6 +117,9 @@ type UploadInput struct { // The language the content is in. ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` @@ -218,8 +221,11 @@ type Uploader struct { // if this value is set to zero, the DefaultUploadPartSize value will be used. PartSize int64 - // The number of goroutines to spin up in parallel when sending parts. - // If this is set to zero, the DefaultUploadConcurrency value will be used. + // The number of goroutines to spin up in parallel per call to Upload when + // sending parts. If this is set to zero, the DefaultUploadConcurrency value + // will be used. + // + // The concurrency pool is not shared between calls to Upload. Concurrency int // Setting this value to true will cause the SDK to avoid calling diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 3b8be4378..23f0a06db 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -35,7 +35,7 @@ const opAssumeRole = "AssumeRole" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { op := &request.Operation{ Name: opAssumeRole, @@ -168,7 +168,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { req, out := c.AssumeRoleRequest(input) return out, req.Send() @@ -215,7 +215,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { op := &request.Operation{ Name: opAssumeRoleWithSAML, @@ -341,7 +341,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { req, out := c.AssumeRoleWithSAMLRequest(input) return out, req.Send() @@ -388,7 +388,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { op := &request.Operation{ Name: opAssumeRoleWithWebIdentity, @@ -543,7 +543,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { req, out := c.AssumeRoleWithWebIdentityRequest(input) return out, req.Send() @@ -590,7 +590,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { op := &request.Operation{ Name: opDecodeAuthorizationMessage, @@ -655,7 +655,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // invalid. This can happen if the token contains invalid characters, such as // linebreaks. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { req, out := c.DecodeAuthorizationMessageRequest(input) return out, req.Send() @@ -702,7 +702,7 @@ const opGetCallerIdentity = "GetCallerIdentity" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { op := &request.Operation{ Name: opGetCallerIdentity, @@ -730,7 +730,7 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // // See the AWS API reference guide for AWS Security Token Service's // API operation GetCallerIdentity for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { req, out := c.GetCallerIdentityRequest(input) return out, req.Send() @@ -777,7 +777,7 @@ const opGetFederationToken = "GetFederationToken" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { op := &request.Operation{ Name: opGetFederationToken, @@ -899,7 +899,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { req, out := c.GetFederationTokenRequest(input) return out, req.Send() @@ -946,7 +946,7 @@ const opGetSessionToken = "GetSessionToken" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { op := &request.Operation{ Name: opGetSessionToken, @@ -1027,7 +1027,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { req, out := c.GetSessionTokenRequest(input) return out, req.Send() @@ -1049,7 +1049,7 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken return out, req.Send() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest type AssumeRoleInput struct { _ struct{} `type:"structure"` @@ -1241,7 +1241,7 @@ func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { // Contains the response to a successful AssumeRole request, including temporary // AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse type AssumeRoleOutput struct { _ struct{} `type:"structure"` @@ -1295,7 +1295,7 @@ func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest type AssumeRoleWithSAMLInput struct { _ struct{} `type:"structure"` @@ -1436,7 +1436,7 @@ func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAML // Contains the response to a successful AssumeRoleWithSAML request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse type AssumeRoleWithSAMLOutput struct { _ struct{} `type:"structure"` @@ -1548,7 +1548,7 @@ func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLO return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest type AssumeRoleWithWebIdentityInput struct { _ struct{} `type:"structure"` @@ -1711,7 +1711,7 @@ func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRo // Contains the response to a successful AssumeRoleWithWebIdentity request, // including temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse type AssumeRoleWithWebIdentityOutput struct { _ struct{} `type:"structure"` @@ -1804,7 +1804,7 @@ func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v strin // The identifiers for the temporary security credentials that the operation // returns. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser type AssumedRoleUser struct { _ struct{} `type:"structure"` @@ -1847,7 +1847,7 @@ func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { } // AWS credentials for API authentication. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials type Credentials struct { _ struct{} `type:"structure"` @@ -1906,7 +1906,7 @@ func (s *Credentials) SetSessionToken(v string) *Credentials { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest type DecodeAuthorizationMessageInput struct { _ struct{} `type:"structure"` @@ -1951,7 +1951,7 @@ func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAut // A document that contains additional information about the authorization status // of a request from an encoded message that is returned in response to an AWS // request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse type DecodeAuthorizationMessageOutput struct { _ struct{} `type:"structure"` @@ -1976,7 +1976,7 @@ func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAu } // Identifiers for the federated user that is associated with the credentials. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser type FederatedUser struct { _ struct{} `type:"structure"` @@ -2017,7 +2017,7 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } @@ -2034,7 +2034,7 @@ func (s GetCallerIdentityInput) GoString() string { // Contains the response to a successful GetCallerIdentity request, including // information about the entity making the request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse type GetCallerIdentityOutput struct { _ struct{} `type:"structure"` @@ -2080,7 +2080,7 @@ func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest type GetFederationTokenInput struct { _ struct{} `type:"structure"` @@ -2189,7 +2189,7 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { // Contains the response to a successful GetFederationToken request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse type GetFederationTokenOutput struct { _ struct{} `type:"structure"` @@ -2242,7 +2242,7 @@ func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest type GetSessionTokenInput struct { _ struct{} `type:"structure"` @@ -2327,7 +2327,7 @@ func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { // Contains the response to a successful GetSessionToken request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse type GetSessionTokenOutput struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index a43fa8055..ef681ab0c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -56,7 +56,7 @@ // // Using the Client // -// To AWS Security Token Service with the SDK use the New function to create +// To contact AWS Security Token Service with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. // These clients are safe to use concurrently. // diff --git a/vendor/vendor.json b/vendor/vendor.json index 421c8710a..5c96bdf85 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -248,244 +248,244 @@ "revision": "4239b77079c7b5d1243b7b4736304ce8ddb6f0f2" }, { - "checksumSHA1": "7vSTKBRZq6azJnhm1k6XmNxu97M=", + "checksumSHA1": "tN8pbihy8mw+m0UVqNNFJmx7p+Y=", "path": "github.com/aws/aws-sdk-go", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "BRgDFkYJonSbN+/ugKMUuSXkS4c=", + "checksumSHA1": "NQu/L+9CIJLpgrZt3UMlbma9Pk0=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "n98FANpNeRT5kf6pizdpI7nm6Sw=", + "checksumSHA1": "9nE/FjZ4pYrT883KtV2/aI+Gayo=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "7/8j/q0TWtOgXyvEcv4B2Dhl00o=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "Y+cPwQL0dZMyqp3wI+KJWmA9KQ8=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "ZdtYh3ZHSgP/WEIaqwJHTEhpkbs=", + "checksumSHA1": "OnU/n7R33oYXiB4SAGd5pK7I0Bs=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "W45tSKW4ZVgBk9H4lx5RbGi9OVg=", + "checksumSHA1": "bV8wC0xzF08ztv57EXbeLjNxmsI=", "path": "github.com/aws/aws-sdk-go/aws/endpoints", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "Rdm9v0vpqsO1Ka9rwktfL19D8A8=", + "checksumSHA1": "JZ49s4cNe3nIttx3hWp04CQif4o=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "SK5Mn4Ga9+equOQTYA1DTSb3LWY=", + "checksumSHA1": "HcGL4e6Uep4/80eCUI5xkcWjpQ0=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "1+ZxEwzc1Vz8X2l+kXkS2iATtas=", + "checksumSHA1": "iU00ZjhAml/13g+1YXT21IqoXqg=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "04ypv4x12l4q0TksA1zEVsmgpvw=", "path": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=", + "checksumSHA1": "NStHCXEvYqG72GknZyv1jaKaeH0=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "O6hcK24yI6w7FA+g4Pbr+eQ7pys=", + "checksumSHA1": "yHfT5DTbeCLs4NE2Rgnqrhe15ls=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "Drt1JfLMa0DQEZLWrnMlTWaIcC8=", + "checksumSHA1": "9V1PvtFQ9MObZTc3sa86WcuOtOU=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "VCTh+dEaqqhog5ncy/WTt9+/gFM=", + "checksumSHA1": "pkeoOfZpHRvFG/AOZeTf0lwtsFg=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "0qYPUga28aQVkxZgBR3Z86AbGUQ=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "Eo9yODN5U99BK0pMzoqnBm7PCrY=", @@ -493,62 +493,62 @@ "path": "github.com/aws/aws-sdk-go/private/waiter", "revision": "1bd588c8b2dba4da57dd4664b2b2750d260a5915", "revisionTime": "2017-02-24T22:28:38Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "INaeHZ2L5x6RlrcQBm4q1hFqNRM=", + "checksumSHA1": "4igS6faf4hrhDj6Jj9ErVcN1qKo=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "YNq7YhasHn9ceelWX2aG0Cg0Ga0=", + "checksumSHA1": "uEv9kkBsVIjg7K4+Y8TVlU0Cc8o=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "SEKg+cGyOj6dKdK5ltUHsoL4R4Y=", + "checksumSHA1": "sCaHoPWsJXRHFbilUKwN71qFTOI=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "RVrBBPDYg3ViwQDLBanFehkdqkM=", + "checksumSHA1": "ZP6QI0X9BNKk8o1p3AyLfjabS20=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/service/s3/s3iface", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "LlIpD3fzngPXshFh/5tuCWlrYzI=", + "checksumSHA1": "g6Eo2gEoj6YEZ+tLwydnfhwo7zg=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/service/s3/s3manager", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { - "checksumSHA1": "MerduaV3PxtZAWvOGpgoBIglo38=", + "checksumSHA1": "W1oFtpaT4TWIIJrAvFcn/XdcT7g=", "comment": "v1.7.1", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "dd3acff9dc16f9a6fd87f6b4501590a532e7206a", - "revisionTime": "2017-08-10T20:40:06Z", - "version": "v1.10.23", - "versionExact": "v1.10.23" + "revision": "0cbaf57ea311890e62f0b01652529295079e9e95", + "revisionTime": "2018-02-05T21:18:42Z", + "version": "v1.12.71", + "versionExact": "v1.12.71" }, { "checksumSHA1": "7SbTaY0kaYxgQrG3/9cjrI+BcyU=", From b651571b30d14c08874bf991c156f8c8394744ac Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Mon, 5 Feb 2018 15:44:25 -0800 Subject: [PATCH 188/251] update changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d73eb0ec..0e8424cb8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,8 @@ ### BACKWARDS INCOMPATIBILITIES: * core: Affects Windows guests: User variables containing Powershell special characters no longer need to be escaped.[GH-5376] * provisioner/powershell: Regression from v1.1.1 forcing extra escaping of environment variables in the non-elevated provisioner has been fixed. [GH-5515] -* 3rd party plugins: We have moved internal dependencies, meaning your 3rd party plugins will break; the work to fix them is minimal and documented in GH-5810. [GH-5810] +* 3rd party plugins: We have moved internal dependencies, meaning your 3rd party plugins will no longer compile (however existing builds will still work fine); the work to fix them is minimal and documented in GH-5810. [GH-5810] +* provisioner/file: We've made destination semantics more consistent across the various communicators. In general, if the destination is a directory, files will be uploaded into the directory instead of failing. This mirrors the behavior of `rsync`. There's a chance some users might be depending on the previous buggy behavior, so it's worth ensuring your configuration is correct. [GH-5426] ### IMPROVEMENTS: From 21812fa17f50cad3dbc5b3a2fab18b054db0aa6f Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 2 Feb 2018 20:16:23 -0800 Subject: [PATCH 189/251] Add volume and run tags if in us-gov/china We can't tag on instance creation when we're in "restricted" regions, so let's add the tags after the resources have been created. Adds methods to AccessConfig to detect if we're in China or US Gov regions (i.e. "restricted"). Also turns tag:tag maps into a type, and moves methods around validating and converting them to ec2Tags to methods of the type. --- builder/amazon/chroot/device_test.go | 10 +++ builder/amazon/common/access_config.go | 16 ++++ builder/amazon/common/access_config_test.go | 20 +++++ builder/amazon/common/ami_config.go | 4 +- builder/amazon/common/step_create_tags.go | 47 ++--------- .../amazon/common/step_run_source_instance.go | 84 +++++++++++++++++-- .../amazon/common/step_run_spot_instance.go | 18 ++-- builder/amazon/common/tags.go | 47 +++++++++++ builder/amazon/ebs/builder.go | 3 +- builder/amazon/ebssurrogate/builder.go | 5 +- builder/amazon/ebsvolume/block_device.go | 2 +- builder/amazon/ebsvolume/builder.go | 1 + .../amazon/ebsvolume/step_tag_ebs_volumes.go | 5 +- builder/amazon/instance/builder.go | 1 + 14 files changed, 198 insertions(+), 65 deletions(-) create mode 100644 builder/amazon/chroot/device_test.go create mode 100644 builder/amazon/common/tags.go diff --git a/builder/amazon/chroot/device_test.go b/builder/amazon/chroot/device_test.go new file mode 100644 index 000000000..a47fc7dff --- /dev/null +++ b/builder/amazon/chroot/device_test.go @@ -0,0 +1,10 @@ +package chroot + +import "testing" + +func TestDevicePrefixMatch(t *testing.T) { + /* + if devicePrefixMatch("nvme0n1") != "" { + } + */ +} diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go index b569d0314..c24ab25be 100644 --- a/builder/amazon/common/access_config.go +++ b/builder/amazon/common/access_config.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "os" + "strings" "time" "github.com/aws/aws-sdk-go/aws" @@ -80,6 +81,21 @@ func (c *AccessConfig) Session() (*session.Session, error) { return c.session, nil } +func (c *AccessConfig) SessionRegion() string { + if c.session == nil { + panic("access config session should be set.") + } + return aws.StringValue(c.session.Config.Region) +} + +func (c *AccessConfig) IsGovCloud() bool { + return strings.HasPrefix(c.SessionRegion(), "us-gov-") +} + +func (c *AccessConfig) IsChinaCloud() bool { + return strings.HasPrefix(c.SessionRegion(), "cn-") +} + // metadataRegion returns the region from the metadata service func (c *AccessConfig) metadataRegion() string { diff --git a/builder/amazon/common/access_config_test.go b/builder/amazon/common/access_config_test.go index 20d4851ba..e1c44cb89 100644 --- a/builder/amazon/common/access_config_test.go +++ b/builder/amazon/common/access_config_test.go @@ -2,6 +2,9 @@ package common import ( "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" ) func testAccessConfig() *AccessConfig { @@ -38,3 +41,20 @@ func TestAccessConfigPrepare_Region(t *testing.T) { c.SkipValidation = false } + +func TestAccessConfigPrepare_RegionRestrictd(t *testing.T) { + c := testAccessConfig() + + // Create a Session with a custom region + c.session = session.Must(session.NewSession(&aws.Config{ + Region: aws.String("us-gov-west-1"), + })) + + if err := c.Prepare(nil); err != nil { + t.Fatalf("shouldn't have err: %s", err) + } + + if !c.IsGovCloud() { + t.Fatal("We should be in gov region.") + } +} diff --git a/builder/amazon/common/ami_config.go b/builder/amazon/common/ami_config.go index 9af2244ee..a3b2d2b4e 100644 --- a/builder/amazon/common/ami_config.go +++ b/builder/amazon/common/ami_config.go @@ -17,7 +17,7 @@ type AMIConfig struct { AMIProductCodes []string `mapstructure:"ami_product_codes"` AMIRegions []string `mapstructure:"ami_regions"` AMISkipRegionValidation bool `mapstructure:"skip_region_validation"` - AMITags map[string]string `mapstructure:"tags"` + AMITags TagMap `mapstructure:"tags"` AMIENASupport bool `mapstructure:"ena_support"` AMISriovNetSupport bool `mapstructure:"sriov_support"` AMIForceDeregister bool `mapstructure:"force_deregister"` @@ -25,7 +25,7 @@ type AMIConfig struct { AMIEncryptBootVolume bool `mapstructure:"encrypt_boot"` AMIKmsKeyId string `mapstructure:"kms_key_id"` AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids"` - SnapshotTags map[string]string `mapstructure:"snapshot_tags"` + SnapshotTags TagMap `mapstructure:"snapshot_tags"` SnapshotUsers []string `mapstructure:"snapshot_users"` SnapshotGroups []string `mapstructure:"snapshot_groups"` } diff --git a/builder/amazon/common/step_create_tags.go b/builder/amazon/common/step_create_tags.go index 47078b6ac..96177c8a8 100644 --- a/builder/amazon/common/step_create_tags.go +++ b/builder/amazon/common/step_create_tags.go @@ -15,8 +15,8 @@ import ( ) type StepCreateTags struct { - Tags map[string]string - SnapshotTags map[string]string + Tags TagMap + SnapshotTags TagMap Ctx interpolate.Context } @@ -33,7 +33,7 @@ func (s *StepCreateTags) Run(_ context.Context, state multistep.StateBag) multis sourceAMI = "" } - if len(s.Tags) == 0 && len(s.SnapshotTags) == 0 { + if !s.Tags.IsSet() && !s.SnapshotTags.IsSet() { return multistep.ActionContinue } @@ -79,22 +79,22 @@ func (s *StepCreateTags) Run(_ context.Context, state multistep.StateBag) multis // Convert tags to ec2.Tag format ui.Say("Creating AMI tags") - amiTags, err := ConvertToEC2Tags(s.Tags, *ec2conn.Config.Region, sourceAMI, s.Ctx) + amiTags, err := s.Tags.EC2Tags(s.Ctx, *ec2conn.Config.Region, sourceAMI) if err != nil { state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - ReportTags(ui, amiTags) + amiTags.Report(ui) ui.Say("Creating snapshot tags") - snapshotTags, err := ConvertToEC2Tags(s.SnapshotTags, *ec2conn.Config.Region, sourceAMI, s.Ctx) + snapshotTags, err := s.SnapshotTags.EC2Tags(s.Ctx, *ec2conn.Config.Region, sourceAMI) if err != nil { state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - ReportTags(ui, snapshotTags) + snapshotTags.Report(ui) // Retry creating tags for about 2.5 minutes err = retry.Retry(0.2, 30, 11, func(_ uint) (bool, error) { @@ -142,36 +142,3 @@ func (s *StepCreateTags) Run(_ context.Context, state multistep.StateBag) multis func (s *StepCreateTags) Cleanup(state multistep.StateBag) { // No cleanup... } - -func ReportTags(ui packer.Ui, tags []*ec2.Tag) { - for _, tag := range tags { - ui.Message(fmt.Sprintf("Adding tag: \"%s\": \"%s\"", - aws.StringValue(tag.Key), aws.StringValue(tag.Value))) - } -} - -func ConvertToEC2Tags(tags map[string]string, region, sourceAmiId string, ctx interpolate.Context) ([]*ec2.Tag, error) { - var ec2Tags []*ec2.Tag - for key, value := range tags { - - ctx.Data = &BuildInfoTemplate{ - SourceAMI: sourceAmiId, - BuildRegion: region, - } - interpolatedKey, err := interpolate.Render(key, &ctx) - if err != nil { - return ec2Tags, fmt.Errorf("Error processing tag: %s:%s - %s", key, value, err) - } - interpolatedValue, err := interpolate.Render(value, &ctx) - if err != nil { - return ec2Tags, fmt.Errorf("Error processing tag: %s:%s - %s", key, value, err) - } - - ec2Tags = append(ec2Tags, &ec2.Tag{ - Key: aws.String(interpolatedKey), - Value: aws.String(interpolatedValue), - }) - } - - return ec2Tags, nil -} diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index cc343961d..761e6f09c 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -8,8 +8,10 @@ import ( "log" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" + retry "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/template/interpolate" @@ -19,19 +21,20 @@ type StepRunSourceInstance struct { AssociatePublicIpAddress bool AvailabilityZone string BlockDevices BlockDevices + Ctx interpolate.Context Debug bool EbsOptimized bool ExpectedRootDevice string IamInstanceProfile string InstanceInitiatedShutdownBehavior string InstanceType string + IsRestricted bool SourceAMI string SubnetId string - Tags map[string]string - VolumeTags map[string]string + Tags TagMap UserData string UserDataFile string - Ctx interpolate.Context + VolumeTags TagMap instanceId string } @@ -85,16 +88,15 @@ func (s *StepRunSourceInstance) Run(_ context.Context, state multistep.StateBag) s.Tags["Name"] = "Packer Builder" } - ec2Tags, err := ConvertToEC2Tags(s.Tags, *ec2conn.Config.Region, s.SourceAMI, s.Ctx) + ec2Tags, err := s.Tags.EC2Tags(s.Ctx, *ec2conn.Config.Region, s.SourceAMI) if err != nil { err := fmt.Errorf("Error tagging source instance: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - ReportTags(ui, ec2Tags) - volTags, err := ConvertToEC2Tags(s.VolumeTags, *ec2conn.Config.Region, s.SourceAMI, s.Ctx) + volTags, err := s.VolumeTags.EC2Tags(s.Ctx, *ec2conn.Config.Region, s.SourceAMI) if err != nil { err := fmt.Errorf("Error tagging volumes: %s", err) state.Put("error", err) @@ -114,6 +116,7 @@ func (s *StepRunSourceInstance) Run(_ context.Context, state multistep.StateBag) EbsOptimized: &s.EbsOptimized, } + // Collect tags for tagging on resource creation var tagSpecs []*ec2.TagSpecification if len(ec2Tags) > 0 { @@ -134,8 +137,11 @@ func (s *StepRunSourceInstance) Run(_ context.Context, state multistep.StateBag) tagSpecs = append(tagSpecs, runVolTags) } - if len(tagSpecs) > 0 { + // If our region supports it, set tag specifications + if len(tagSpecs) > 0 && !s.IsRestricted { runOpts.SetTagSpecifications(tagSpecs) + ec2Tags.Report(ui) + volTags.Report(ui) } if keyName != "" { @@ -212,6 +218,70 @@ func (s *StepRunSourceInstance) Run(_ context.Context, state multistep.StateBag) state.Put("instance", instance) + // If we're in a region that doesn't support tagging on instance creation, + // do that now. + + if s.IsRestricted { + ec2Tags.Report(ui) + // Retry creating tags for about 2.5 minutes + err = retry.Retry(0.2, 30, 11, func(_ uint) (bool, error) { + _, err := ec2conn.CreateTags(&ec2.CreateTagsInput{ + Tags: ec2Tags, + Resources: []*string{instance.InstanceId}, + }) + if err == nil { + return true, nil + } + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "InvalidInstanceID.NotFound" { + return false, nil + } + } + return true, err + }) + + if err != nil { + err := fmt.Errorf("Error tagging source instance: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + // Now tag volumes + + volumeIds := make([]*string, 0) + for _, v := range instance.BlockDeviceMappings { + if ebs := v.Ebs; ebs != nil { + volumeIds = append(volumeIds, ebs.VolumeId) + } + } + + if len(volumeIds) > 0 && s.VolumeTags.IsSet() { + ui.Say("Adding tags to source EBS Volumes") + + volumeTags, err := s.VolumeTags.EC2Tags(s.Ctx, *ec2conn.Config.Region, s.SourceAMI) + if err != nil { + err := fmt.Errorf("Error tagging source EBS Volumes on %s: %s", *instance.InstanceId, err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + volumeTags.Report(ui) + + _, err = ec2conn.CreateTags(&ec2.CreateTagsInput{ + Resources: volumeIds, + Tags: volumeTags, + }) + + if err != nil { + err := fmt.Errorf("Error tagging source EBS Volumes on %s: %s", *instance.InstanceId, err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + } + } + return multistep.ActionContinue } diff --git a/builder/amazon/common/step_run_spot_instance.go b/builder/amazon/common/step_run_spot_instance.go index c5e3382ec..0f08ea90b 100644 --- a/builder/amazon/common/step_run_spot_instance.go +++ b/builder/amazon/common/step_run_spot_instance.go @@ -33,8 +33,8 @@ type StepRunSpotInstance struct { SpotPrice string SpotPriceProduct string SubnetId string - Tags map[string]string - VolumeTags map[string]string + Tags TagMap + VolumeTags TagMap UserData string UserDataFile string Ctx interpolate.Context @@ -143,14 +143,14 @@ func (s *StepRunSpotInstance) Run(_ context.Context, state multistep.StateBag) m s.Tags["Name"] = "Packer Builder" } - ec2Tags, err := ConvertToEC2Tags(s.Tags, *ec2conn.Config.Region, s.SourceAMI, s.Ctx) + ec2Tags, err := s.Tags.EC2Tags(s.Ctx, *ec2conn.Config.Region, s.SourceAMI) if err != nil { err := fmt.Errorf("Error tagging source instance: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - ReportTags(ui, ec2Tags) + ec2Tags.Report(ui) ui.Message(fmt.Sprintf( "Requesting spot instance '%s' for: %s", @@ -284,21 +284,21 @@ func (s *StepRunSpotInstance) Run(_ context.Context, state multistep.StateBag) m } } - if len(volumeIds) > 0 && len(s.VolumeTags) > 0 { + if len(volumeIds) > 0 && s.VolumeTags.IsSet() { ui.Say("Adding tags to source EBS Volumes") - tags, err := ConvertToEC2Tags(s.VolumeTags, *ec2conn.Config.Region, s.SourceAMI, s.Ctx) + + volumeTags, err := s.VolumeTags.EC2Tags(s.Ctx, *ec2conn.Config.Region, s.SourceAMI) if err != nil { err := fmt.Errorf("Error tagging source EBS Volumes on %s: %s", *instance.InstanceId, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - - ReportTags(ui, tags) + volumeTags.Report(ui) _, err = ec2conn.CreateTags(&ec2.CreateTagsInput{ Resources: volumeIds, - Tags: tags, + Tags: volumeTags, }) if err != nil { diff --git a/builder/amazon/common/tags.go b/builder/amazon/common/tags.go new file mode 100644 index 000000000..3c257e10f --- /dev/null +++ b/builder/amazon/common/tags.go @@ -0,0 +1,47 @@ +package common + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/template/interpolate" +) + +type TagMap map[string]string +type EC2Tags []*ec2.Tag + +func (t EC2Tags) Report(ui packer.Ui) { + for _, tag := range t { + ui.Message(fmt.Sprintf("Adding tag: \"%s\": \"%s\"", + aws.StringValue(tag.Key), aws.StringValue(tag.Value))) + } +} + +func (t TagMap) IsSet() bool { + return len(t) > 0 +} + +func (t TagMap) EC2Tags(ctx interpolate.Context, region, sourceAMIID string) (EC2Tags, error) { + var ec2Tags []*ec2.Tag + ctx.Data = &BuildInfoTemplate{ + SourceAMI: sourceAMIID, + BuildRegion: region, + } + for key, value := range t { + interpolatedKey, err := interpolate.Render(key, &ctx) + if err != nil { + return nil, fmt.Errorf("Error processing tag: %s:%s - %s", key, value, err) + } + interpolatedValue, err := interpolate.Render(value, &ctx) + if err != nil { + return nil, fmt.Errorf("Error processing tag: %s:%s - %s", key, value, err) + } + ec2Tags = append(ec2Tags, &ec2.Tag{ + Key: aws.String(interpolatedKey), + Value: aws.String(interpolatedValue), + }) + } + return ec2Tags, nil +} diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index 44e14b695..87354223c 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -28,7 +28,7 @@ type Config struct { awscommon.AMIConfig `mapstructure:",squash"` awscommon.BlockDevices `mapstructure:",squash"` awscommon.RunConfig `mapstructure:",squash"` - VolumeRunTags map[string]string `mapstructure:"run_volume_tags"` + VolumeRunTags awscommon.TagMap `mapstructure:"run_volume_tags"` ctx interpolate.Context } @@ -152,6 +152,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe IamInstanceProfile: b.config.IamInstanceProfile, InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior, InstanceType: b.config.InstanceType, + IsRestricted: b.config.IsChinaCloud() || b.config.IsGovCloud(), SourceAMI: b.config.SourceAmi, SubnetId: b.config.SubnetId, Tags: b.config.RunTags, diff --git a/builder/amazon/ebssurrogate/builder.go b/builder/amazon/ebssurrogate/builder.go index 6dba669d8..b990c5758 100644 --- a/builder/amazon/ebssurrogate/builder.go +++ b/builder/amazon/ebssurrogate/builder.go @@ -26,8 +26,8 @@ type Config struct { awscommon.BlockDevices `mapstructure:",squash"` awscommon.AMIConfig `mapstructure:",squash"` - RootDevice RootBlockDevice `mapstructure:"ami_root_device"` - VolumeRunTags map[string]string `mapstructure:"run_volume_tags"` + RootDevice RootBlockDevice `mapstructure:"ami_root_device"` + VolumeRunTags awscommon.TagMap `mapstructure:"run_volume_tags"` ctx interpolate.Context } @@ -166,6 +166,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe IamInstanceProfile: b.config.IamInstanceProfile, InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior, InstanceType: b.config.InstanceType, + IsRestricted: b.config.IsChinaCloud() || b.config.IsGovCloud(), SourceAMI: b.config.SourceAmi, SubnetId: b.config.SubnetId, Tags: b.config.RunTags, diff --git a/builder/amazon/ebsvolume/block_device.go b/builder/amazon/ebsvolume/block_device.go index 5a23821b3..0d2b04613 100644 --- a/builder/amazon/ebsvolume/block_device.go +++ b/builder/amazon/ebsvolume/block_device.go @@ -7,7 +7,7 @@ import ( type BlockDevice struct { awscommon.BlockDevice `mapstructure:"-,squash"` - Tags map[string]string `mapstructure:"tags"` + Tags awscommon.TagMap `mapstructure:"tags"` } func commonBlockDevices(mappings []BlockDevice, ctx *interpolate.Context) (awscommon.BlockDevices, error) { diff --git a/builder/amazon/ebsvolume/builder.go b/builder/amazon/ebsvolume/builder.go index 83fc271fb..cdfda8e14 100644 --- a/builder/amazon/ebsvolume/builder.go +++ b/builder/amazon/ebsvolume/builder.go @@ -149,6 +149,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe IamInstanceProfile: b.config.IamInstanceProfile, InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior, InstanceType: b.config.InstanceType, + IsRestricted: b.config.IsChinaCloud() || b.config.IsGovCloud(), SourceAMI: b.config.SourceAmi, SubnetId: b.config.SubnetId, Tags: b.config.RunTags, diff --git a/builder/amazon/ebsvolume/step_tag_ebs_volumes.go b/builder/amazon/ebsvolume/step_tag_ebs_volumes.go index a638ec439..31e6799b1 100644 --- a/builder/amazon/ebsvolume/step_tag_ebs_volumes.go +++ b/builder/amazon/ebsvolume/step_tag_ebs_volumes.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/aws/aws-sdk-go/service/ec2" - awscommon "github.com/hashicorp/packer/builder/amazon/common" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/template/interpolate" @@ -44,14 +43,14 @@ func (s *stepTagEBSVolumes) Run(_ context.Context, state multistep.StateBag) mul continue } - tags, err := awscommon.ConvertToEC2Tags(mapping.Tags, *ec2conn.Config.Region, *sourceAMI.ImageId, s.Ctx) + tags, err := mapping.Tags.EC2Tags(s.Ctx, *ec2conn.Config.Region, *sourceAMI.ImageId) if err != nil { err := fmt.Errorf("Error tagging device %s with %s", mapping.DeviceName, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - awscommon.ReportTags(ui, tags) + tags.Report(ui) for _, v := range instance.BlockDeviceMappings { if *v.DeviceName == mapping.DeviceName { diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 828733a9f..98d9dc24d 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -232,6 +232,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe EbsOptimized: b.config.EbsOptimized, IamInstanceProfile: b.config.IamInstanceProfile, InstanceType: b.config.InstanceType, + IsRestricted: b.config.IsChinaCloud() || b.config.IsGovCloud(), SourceAMI: b.config.SourceAmi, SubnetId: b.config.SubnetId, Tags: b.config.RunTags, From c29e5de38122872cab55cf049580ebd5e8517f6d Mon Sep 17 00:00:00 2001 From: Edward Date: Fri, 17 Nov 2017 10:28:15 +0800 Subject: [PATCH 190/251] Remove the deprecated extensions The Nova extension API was deprecated from OpenStack N release. this parts of code cannot work well with the newest OpenStack version. This patch is to remove the relative parts: 1. Remove the step_load_extensions.go 2. Remove the step of extension from builder.go 3. Remove the parameter parsing from step_stop_server.go Resolves: #5581 --- builder/openstack/builder.go | 1 - builder/openstack/step_load_extensions.go | 59 ----------------------- builder/openstack/step_stop_server.go | 7 --- 3 files changed, 67 deletions(-) mode change 100644 => 100755 builder/openstack/builder.go delete mode 100644 builder/openstack/step_load_extensions.go mode change 100644 => 100755 builder/openstack/step_stop_server.go diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go old mode 100644 new mode 100755 index f3c6fd13b..42770c625 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -70,7 +70,6 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe // Build the steps steps := []multistep.Step{ - &StepLoadExtensions{}, &StepLoadFlavor{ Flavor: b.config.Flavor, }, diff --git a/builder/openstack/step_load_extensions.go b/builder/openstack/step_load_extensions.go deleted file mode 100644 index b11cf5527..000000000 --- a/builder/openstack/step_load_extensions.go +++ /dev/null @@ -1,59 +0,0 @@ -package openstack - -import ( - "context" - "fmt" - "log" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions" - "github.com/gophercloud/gophercloud/pagination" - "github.com/hashicorp/packer/helper/multistep" - "github.com/hashicorp/packer/packer" -) - -// StepLoadExtensions gets the FlavorRef from a Flavor. It first assumes -// that the Flavor is a ref and verifies it. Otherwise, it tries to find -// the flavor by name. -type StepLoadExtensions struct{} - -func (s *StepLoadExtensions) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(Config) - ui := state.Get("ui").(packer.Ui) - - // We need the v2 compute client - client, err := config.computeV2Client() - if err != nil { - err = fmt.Errorf("Error initializing compute client: %s", err) - state.Put("error", err) - return multistep.ActionHalt - } - - ui.Say("Discovering enabled extensions...") - result := make(map[string]struct{}, 15) - pager := extensions.List(client) - err = pager.EachPage(func(p pagination.Page) (bool, error) { - // Extract the extensions from this page - exts, err := extensions.ExtractExtensions(p) - if err != nil { - return false, err - } - - for _, ext := range exts { - log.Printf("[DEBUG] Discovered extension: %s", ext.Alias) - result[ext.Alias] = struct{}{} - } - - return true, nil - }) - if err != nil { - err = fmt.Errorf("Error loading extensions: %s", err) - state.Put("error", err) - return multistep.ActionHalt - } - - state.Put("extensions", result) - return multistep.ActionContinue -} - -func (s *StepLoadExtensions) Cleanup(state multistep.StateBag) { -} diff --git a/builder/openstack/step_stop_server.go b/builder/openstack/step_stop_server.go old mode 100644 new mode 100755 index 1c864eef0..4a0f867b0 --- a/builder/openstack/step_stop_server.go +++ b/builder/openstack/step_stop_server.go @@ -15,15 +15,8 @@ type StepStopServer struct{} func (s *StepStopServer) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) config := state.Get("config").(Config) - extensions := state.Get("extensions").(map[string]struct{}) server := state.Get("server").(*servers.Server) - // Verify we have the extension - if _, ok := extensions["os-server-start-stop"]; !ok { - ui.Say("OpenStack cluster doesn't support stop, skipping...") - return multistep.ActionContinue - } - // We need the v2 compute client client, err := config.computeV2Client() if err != nil { From c918e4113ffe1d2f2db5b4c41ad289e34df29987 Mon Sep 17 00:00:00 2001 From: Edward Date: Wed, 29 Nov 2017 10:36:05 +0800 Subject: [PATCH 191/251] Add the version note in OpenStack builder section --- website/source/docs/builders/openstack.html.md | 4 ++++ 1 file changed, 4 insertions(+) mode change 100644 => 100755 website/source/docs/builders/openstack.html.md diff --git a/website/source/docs/builders/openstack.html.md b/website/source/docs/builders/openstack.html.md old mode 100644 new mode 100755 index 5406f90ed..709d39246 --- a/website/source/docs/builders/openstack.html.md +++ b/website/source/docs/builders/openstack.html.md @@ -25,6 +25,9 @@ created. This simplifies configuration quite a bit. The builder does *not* manage images. Once it creates an image, it is up to you to use it or delete it. +~> **Note:** To use OpenStack builder with the OpenStack Newton (Oct 2016) +or earlier, we recommend you use Packer v1.1.2 or earlier version. + ~> **OpenStack Liberty or later requires OpenSSL!** To use the OpenStack builder with OpenStack Liberty (Oct 2015) or later you need to have OpenSSL installed *if you are using temporary key pairs*, i.e. don't use @@ -32,6 +35,7 @@ installed *if you are using temporary key pairs*, i.e. don't use [`ssh_password`](/docs/templates/communicator.html#ssh_password). All major OS'es have OpenSSL installed by default except Windows. + ## Configuration Reference There are many configuration options available for the builder. They are From 578de6ea890e6458e24b6887db7a10445f3ecbb4 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 6 Feb 2018 17:39:28 -0800 Subject: [PATCH 192/251] update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e8424cb8..a8c3105e8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ * provisioner/powershell: Regression from v1.1.1 forcing extra escaping of environment variables in the non-elevated provisioner has been fixed. [GH-5515] * 3rd party plugins: We have moved internal dependencies, meaning your 3rd party plugins will no longer compile (however existing builds will still work fine); the work to fix them is minimal and documented in GH-5810. [GH-5810] * provisioner/file: We've made destination semantics more consistent across the various communicators. In general, if the destination is a directory, files will be uploaded into the directory instead of failing. This mirrors the behavior of `rsync`. There's a chance some users might be depending on the previous buggy behavior, so it's worth ensuring your configuration is correct. [GH-5426] +* builder/openstack: Extension support has been removed. To use OpenStack builder with the OpenStack Newton (Oct 2016) or earlier, we recommend you use Packer v1.1.2 or earlier version. ### IMPROVEMENTS: From 2ae73162e506decfe4abca12bd3028183fef1344 Mon Sep 17 00:00:00 2001 From: James Cunningham Date: Tue, 6 Feb 2018 19:58:54 -0700 Subject: [PATCH 193/251] fix docs mountpoint typo --- website/source/docs/builders/amazon-chroot.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/amazon-chroot.html.md b/website/source/docs/builders/amazon-chroot.html.md index 1ffe84f38..3f7839fdc 100644 --- a/website/source/docs/builders/amazon-chroot.html.md +++ b/website/source/docs/builders/amazon-chroot.html.md @@ -335,7 +335,7 @@ chroot by Packer: These default mounts are usually good enough for anyone and are sane defaults. However, if you want to change or add the mount points, you may using the `chroot_mounts` configuration. Here is an example configuration which only -mounts `/prod` and `/dev`: +mounts `/proc` and `/dev`: ``` json { From f8f256354f953d6a3cb01422a3babc4e3455e31f Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 6 Feb 2018 16:53:26 -0800 Subject: [PATCH 194/251] use winrmcp logic when creating new winrm client for uploads and downloads --- communicator/winrm/communicator.go | 52 ++++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 9 deletions(-) diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go index 3970d1af8..6cf2bce37 100644 --- a/communicator/winrm/communicator.go +++ b/communicator/winrm/communicator.go @@ -128,12 +128,10 @@ func (c *Communicator) Upload(path string, input io.Reader, fi *os.FileInfo) err return err } - // Get information about destination path - endpoint := winrm.NewEndpoint(c.endpoint.Host, c.endpoint.Port, c.config.Https, c.config.Insecure, nil, nil, nil, c.config.Timeout) - client, err := winrm.NewClient(endpoint, c.config.Username, c.config.Password) if err != nil { return fmt.Errorf("Was unable to create winrm client: %s", err) } + client, err := c.newWinRMClient() stdout, _, _, err := client.RunWithString(fmt.Sprintf("powershell -Command \"(Get-Item %s) -is [System.IO.DirectoryInfo]\"", path), "") if err != nil { return fmt.Errorf("Couldn't determine whether destination was a folder or file: %s", err) @@ -162,8 +160,7 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error } func (c *Communicator) Download(src string, dst io.Writer) error { - endpoint := winrm.NewEndpoint(c.endpoint.Host, c.endpoint.Port, c.config.Https, c.config.Insecure, nil, nil, nil, c.config.Timeout) - client, err := winrm.NewClient(endpoint, c.config.Username, c.config.Password) + client, err := c.newWinRMClient() if err != nil { return err } @@ -182,9 +179,8 @@ func (c *Communicator) DownloadDir(src string, dst string, exclude []string) err return fmt.Errorf("WinRM doesn't support download dir.") } -func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) { - addr := fmt.Sprintf("%s:%d", c.endpoint.Host, c.endpoint.Port) - return winrmcp.New(addr, &winrmcp.Config{ +func (c *Communicator) getClientConfig() *winrmcp.Config { + return &winrmcp.Config{ Auth: winrmcp.Auth{ User: c.config.Username, Password: c.config.Password, @@ -194,7 +190,45 @@ func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) { OperationTimeout: c.config.Timeout, MaxOperationsPerShell: 15, // lowest common denominator TransportDecorator: c.config.TransportDecorator, - }) + } +} + +func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) { + addr := fmt.Sprintf("%s:%d", c.endpoint.Host, c.endpoint.Port) + clientConfig := c.getClientConfig() + return winrmcp.New(addr, clientConfig) +} + +func (c *Communicator) newWinRMClient() (*winrm.Client, error) { + conf := c.getClientConfig() + + // Shamelessly borrowed from the winrmcp client to ensure + // that the client is configured using the same defaulting behaviors that + // winrmcp uses even we we aren't using winrmcp. This ensures similar + // behavior between upload, download, and copy functions. We can't use the + // one generated by winrmcp because it isn't exported. + var endpoint *winrm.Endpoint + endpoint = &winrm.Endpoint{ + Host: c.endpoint.Host, + Port: c.endpoint.Port, + HTTPS: conf.Https, + Insecure: conf.Insecure, + TLSServerName: conf.TLSServerName, + CACert: conf.CACertBytes, + Timeout: conf.ConnectTimeout, + } + params := winrm.NewParameters( + winrm.DefaultParameters.Timeout, + winrm.DefaultParameters.Locale, + winrm.DefaultParameters.EnvelopeSize, + ) + + params.TransportDecorator = conf.TransportDecorator + params.Timeout = "PT3M" + + client, err := winrm.NewClientWithParameters( + endpoint, conf.Auth.User, conf.Auth.Password, params) + return client, err } type Base64Pipe struct { From 974d9974fe1d6aaa0dc99f3abaec2159b2864f45 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 7 Feb 2018 11:34:18 -0800 Subject: [PATCH 195/251] add workaround for azure bug. --- provisioner/windows-restart/provisioner.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/provisioner/windows-restart/provisioner.go b/provisioner/windows-restart/provisioner.go index 0568516aa..ecf15e6a1 100644 --- a/provisioner/windows-restart/provisioner.go +++ b/provisioner/windows-restart/provisioner.go @@ -114,6 +114,12 @@ var waitForRestart = func(p *Provisioner, comm packer.Communicator) error { var cmd *packer.RemoteCmd trycommand := TryCheckReboot abortcommand := AbortReboot + + // This sleep works around an azure/winrm bug. For more info see + // https://github.com/hashicorp/packer/issues/5257; we can remove the + // sleep when the underlying bug has been resolved. + time.Sleep(1 * time.Second) + // Stolen from Vagrant reboot checker for { log.Printf("Check if machine is rebooting...") From c03ce222b20404d0cd97f906f3dca0a5f042f985 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 7 Feb 2018 15:45:00 -0800 Subject: [PATCH 196/251] add atlas deprecation warnings. --- command/push.go | 24 +++++++++++++++++------- post-processor/atlas/post-processor.go | 11 +++++++++++ 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/command/push.go b/command/push.go index 723e9d92c..f063ae0d3 100644 --- a/command/push.go +++ b/command/push.go @@ -183,15 +183,15 @@ func (c *PushCommand) Run(args []string) int { info := &uploadBuildInfo{Type: b.Type} // todo: remove post-migration if b.Type == "vagrant" { - c.Ui.Message("\n-----------------------------------------------------------------------------------\n" + - "Warning: Vagrant-related functionality will be moved from Terraform Enterprise into \n" + - "its own product, Vagrant Cloud. This migration is currently planned for June 27th, \n" + - "2017 at 6PM EDT/3PM PDT/10PM UTC. For more information see \n" + + c.Ui.Error("\n-----------------------------------------------------------------------------------\n" + + "Vagrant-related functionality has been moved from Terraform Enterprise into \n" + + "its own product, Vagrant Cloud. For more information see " + "https://www.vagrantup.com/docs/vagrant-cloud/vagrant-cloud-migration.html\n" + - "In the meantime, you should activate your Vagrant Cloud account and replace your \n" + - "Atlas post-processor with the Vagrant Cloud post-processor. See\n" + - "https://www.packer.io/docs/post-processors/vagrant-cloud.html for more details." + + "Please replace the Atlas post-processor with the Vagrant Cloud post-processor,\n" + + "and see https://www.packer.io/docs/post-processors/vagrant-cloud.html for\n" + + "more detail.\n" + "-----------------------------------------------------------------------------------\n") + return 1 } // Determine if we're artifacting this build @@ -251,6 +251,16 @@ func (c *PushCommand) Run(args []string) int { "Builds: %s\n\n", strings.Join(badBuilds, ", "))) } + c.Ui.Message("\n-----------------------------------------------------------------------\n" + + "Deprecation warning: The Packer and Artifact Registry features of Atlas\n" + + "will no longer be actively developed or maintained and will be fully\n" + + "decommissioned on Friday, March 30, 2018. Please see our guide on\n" + + "building immutable infrastructure with Packer on CI/CD for ideas on\n" + + "implementing these features yourself:\n" + + "https://www.packer.io/guides/packer-on-cicd/\n" + + "-----------------------------------------------------------------------\n", + ) + // Start the archiving process r, err := archive.CreateArchive(path, &opts) if err != nil { diff --git a/post-processor/atlas/post-processor.go b/post-processor/atlas/post-processor.go index 9b06e61ec..a0df0db06 100644 --- a/post-processor/atlas/post-processor.go +++ b/post-processor/atlas/post-processor.go @@ -151,6 +151,17 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac "and see https://www.packer.io/docs/post-processors/vagrant-cloud.html for\n" + "more detail.\n") } + + ui.Message("\n-----------------------------------------------------------------------\n" + + "Deprecation warning: The Packer and Artifact Registry features of Atlas\n" + + "will no longer be actively developed or maintained and will be fully\n" + + "decommissioned on Friday, March 30, 2018. Please see our guide on\n" + + "building immutable infrastructure with Packer on CI/CD for ideas on\n" + + "implementing these features yourself:\n" + + "https://www.packer.io/guides/packer-on-cicd/\n" + + "-----------------------------------------------------------------------\n", + ) + if _, err := p.client.Artifact(p.config.user, p.config.name); err != nil { if err != atlas.ErrNotFound { return nil, false, fmt.Errorf( From 5d67f77f4325b7c9922e72170f52030fa9dfade0 Mon Sep 17 00:00:00 2001 From: Marc Mercer Date: Wed, 7 Feb 2018 19:41:01 -0800 Subject: [PATCH 197/251] Fixing 5852 --- provisioner/ansible/provisioner.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/provisioner/ansible/provisioner.go b/provisioner/ansible/provisioner.go index 18965d220..4992b532c 100644 --- a/provisioner/ansible/provisioner.go +++ b/provisioner/ansible/provisioner.go @@ -327,7 +327,11 @@ func (p *Provisioner) executeAnsible(ui packer.Ui, comm packer.Communicator, pri p.config.PackerBuildName, p.config.PackerBuilderType), "-i", inventory, playbook} if len(privKeyFile) > 0 { - args = append(args, "--private-key", privKeyFile) + // Changed this from using --private-key to supplying -e ansible_ssh_private_key as the latter + // is treated as a highest priority variable, and thus prevents overriding by dynamic variables + // as seen in #5852 + // args = append(args, "--private-key", privKeyFile) + args = append(args, "-e", fmt.Sprintf("ansible_ssh_private_key_file=%s", privKeyFile)) } args = append(args, p.config.ExtraArguments...) if len(p.config.AnsibleEnvVars) > 0 { From d3c7d43f20236706414c735012280e72baf38223 Mon Sep 17 00:00:00 2001 From: Marc Mercer Date: Wed, 7 Feb 2018 19:47:01 -0800 Subject: [PATCH 198/251] Small typo correction --- provisioner/ansible/provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provisioner/ansible/provisioner.go b/provisioner/ansible/provisioner.go index 4992b532c..2984e496a 100644 --- a/provisioner/ansible/provisioner.go +++ b/provisioner/ansible/provisioner.go @@ -327,7 +327,7 @@ func (p *Provisioner) executeAnsible(ui packer.Ui, comm packer.Communicator, pri p.config.PackerBuildName, p.config.PackerBuilderType), "-i", inventory, playbook} if len(privKeyFile) > 0 { - // Changed this from using --private-key to supplying -e ansible_ssh_private_key as the latter + // Changed this from using --private-key to supplying -e ansible_ssh_private_key_file as the latter // is treated as a highest priority variable, and thus prevents overriding by dynamic variables // as seen in #5852 // args = append(args, "--private-key", privKeyFile) From 22666153f9b2161adb9929d2547ae1901adf6e84 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 7 Feb 2018 20:16:15 -0800 Subject: [PATCH 199/251] Add `winrm_no_proxy` option. Setting this adds the remote host:ip to the `NO_PROXY` environment variable. --- helper/communicator/config.go | 9 ++++--- helper/communicator/step_connect_test.go | 24 +++++++++++++++++ helper/communicator/step_connect_winrm.go | 26 +++++++++++++++++++ .../docs/templates/communicator.html.md | 15 +++++++---- 4 files changed, 65 insertions(+), 9 deletions(-) diff --git a/helper/communicator/config.go b/helper/communicator/config.go index f2664b192..52b81a3a6 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -41,14 +41,15 @@ type Config struct { SSHReadWriteTimeout time.Duration `mapstructure:"ssh_read_write_timeout"` // WinRM - WinRMUser string `mapstructure:"winrm_username"` - WinRMPassword string `mapstructure:"winrm_password"` WinRMHost string `mapstructure:"winrm_host"` + WinRMInsecure bool `mapstructure:"winrm_insecure"` + WinRMNoProxy bool `mapstructure:"winrm_no_proxy"` + WinRMPassword string `mapstructure:"winrm_password"` WinRMPort int `mapstructure:"winrm_port"` WinRMTimeout time.Duration `mapstructure:"winrm_timeout"` - WinRMUseSSL bool `mapstructure:"winrm_use_ssl"` - WinRMInsecure bool `mapstructure:"winrm_insecure"` WinRMUseNTLM bool `mapstructure:"winrm_use_ntlm"` + WinRMUseSSL bool `mapstructure:"winrm_use_ssl"` + WinRMUser string `mapstructure:"winrm_username"` WinRMTransportDecorator func() winrm.Transporter } diff --git a/helper/communicator/step_connect_test.go b/helper/communicator/step_connect_test.go index fb61f6463..6db32fba5 100644 --- a/helper/communicator/step_connect_test.go +++ b/helper/communicator/step_connect_test.go @@ -3,10 +3,12 @@ package communicator import ( "bytes" "context" + "os" "testing" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" + "github.com/stretchr/testify/assert" ) func TestStepConnect_impl(t *testing.T) { @@ -29,6 +31,28 @@ func TestStepConnect_none(t *testing.T) { } } +var noProxyTests = []struct { + current string + expected string +}{ + {"", "foo:1"}, + {"foo:1", "foo:1"}, + {"foo:1,bar:2", "foo:1,bar:2"}, + {"bar:2", "bar:2,foo:1"}, +} + +func TestStepConnect_setNoProxy(t *testing.T) { + key := "NO_PROXY" + for _, tt := range noProxyTests { + if value := os.Getenv(key); value != "" { + os.Unsetenv(key) + defer func() { os.Setenv(key, value) }() + os.Setenv(key, tt.current) + assert.Equal(t, tt.expected, os.Getenv(key), "env not set correctly.") + } + } +} + func testState(t *testing.T) multistep.StateBag { state := new(multistep.BasicStateBag) state.Put("hook", &packer.MockHook{}) diff --git a/helper/communicator/step_connect_winrm.go b/helper/communicator/step_connect_winrm.go index 06e6236f8..cdda73a5a 100644 --- a/helper/communicator/step_connect_winrm.go +++ b/helper/communicator/step_connect_winrm.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "log" + "os" "strings" "time" @@ -128,6 +129,12 @@ func (s *StepConnectWinRM) waitForWinRM(state multistep.StateBag, cancel <-chan } } + if s.Config.WinRMNoProxy { + if err := setNoProxy(host, port); err != nil { + return nil, fmt.Errorf("Error setting no_proxy: %s", err) + } + } + log.Println("[INFO] Attempting WinRM connection...") comm, err = winrm.New(&winrm.Config{ Host: host, @@ -182,3 +189,22 @@ func (s *StepConnectWinRM) waitForWinRM(state multistep.StateBag, cancel <-chan return comm, nil } + +// setNoProxy configures the $NO_PROXY env var +func setNoProxy(host string, port int) error { + current := os.Getenv("NO_PROXY") + p := fmt.Sprintf("%s:%d", host, port) + // not set + // set + // is set and not contains + // set + // is set and contains + if current == "" { + return os.Setenv("NO_PROXY", p) + } + if !strings.Contains(current, p) { + return os.Setenv("NO_PROXY", strings.Join([]string{current, p}, ",")) + } + return nil + +} diff --git a/website/source/docs/templates/communicator.html.md b/website/source/docs/templates/communicator.html.md index 0165ab5a1..db768a90a 100644 --- a/website/source/docs/templates/communicator.html.md +++ b/website/source/docs/templates/communicator.html.md @@ -139,16 +139,21 @@ The WinRM communicator has the following options. - `winrm_password` (string) - The password to use to connect to WinRM. +- `winrm_insecure` (boolean) - If true, do not check server certificate + chain and host name + +* `winrm_no_proxy` (boolean) - Setting this to `true` adds the remote + `host:post` to the `NO_PROXY` environment variable. This has the effect of + bypassing any configured proxies when connecting to the remote host. + Default to `false`. + - `winrm_timeout` (string) - The amount of time to wait for WinRM to become available. This defaults to "30m" since setting up a Windows machine generally takes a long time. -- `winrm_use_ssl` (boolean) - If true, use HTTPS for WinRM - -- `winrm_insecure` (boolean) - If true, do not check server certificate - chain and host name - - `winrm_use_ntlm` (boolean) - If true, NTLM authentication will be used for WinRM, rather than default (basic authentication), removing the requirement for basic authentication to be enabled within the target guest. Further reading for remote connection authentication can be found [here](https://msdn.microsoft.com/en-us/library/aa384295(v=vs.85).aspx). + +- `winrm_use_ssl` (boolean) - If true, use HTTPS for WinRM From 478589abec270dd76afea0abff85aa1ddcbf76ca Mon Sep 17 00:00:00 2001 From: DanHam Date: Wed, 25 Oct 2017 13:50:58 +0100 Subject: [PATCH 200/251] Extend upload and subsequent 'dot sourcing' of env vars to std PS command * Wrap funcs to flatten and upload env vars with new func prepareEnvVars. While the wrapped funcs could be combined, keeping them separate simplifies testing. * Configure/refactor std and elevated PS to use new funcs to prepare, upload and dot source env vars. * Dot sourcing the env vars in this way avoids the need to embed them directly in the command string. This avoids the need to escape the env vars to ensure the command string is correctly parsed. * Characters within the env vars that are special to PS (such as $'s and backticks) will still need to be escaped to allow them to be correctly interpreted by PS. * The std and elevated PS commands now inject env vars into the remote env via the same mechanism. This ensures consistent behaviour across the two command types. Fixes #5471 --- provisioner/powershell/provisioner.go | 52 +++++++++++++++++++-------- 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go index 4297acf08..8b9b53440 100644 --- a/provisioner/powershell/provisioner.go +++ b/provisioner/powershell/provisioner.go @@ -113,7 +113,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } if p.config.EnvVarFormat == "" { - p.config.EnvVarFormat = `$env:%s=\"%s\"; ` + p.config.EnvVarFormat = `$env:%s="%s"; ` } if p.config.ElevatedEnvVarFormat == "" { @@ -121,7 +121,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } if p.config.ExecuteCommand == "" { - p.config.ExecuteCommand = `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};{{.Vars}}&'{{.Path}}';exit $LastExitCode }"` + p.config.ExecuteCommand = `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}';exit $LastExitCode }"` } if p.config.ElevatedExecuteCommand == "" { @@ -331,6 +331,19 @@ func (p *Provisioner) retryable(f func() error) error { } } +// Enviroment variables required within the remote environment are uploaded within a PS script and +// then enabled by 'dot sourcing' the script immediately prior to execution of the main command +func (p *Provisioner) prepareEnvVars(elevated bool) (envVarPath string, err error) { + // Collate all required env vars into a plain string with required formatting applied + flattenedEnvVars := p.createFlattenedEnvVars(elevated) + // Create a powershell script on the target build fs containing the flattened env vars + envVarPath, err = p.uploadEnvVars(flattenedEnvVars) + if err != nil { + return "", err + } + return +} + func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { flattened = "" envVars := make(map[string]string) @@ -367,6 +380,19 @@ func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { return } +func (p *Provisioner) uploadEnvVars(flattenedEnvVars string) (envVarPath string, err error) { + // Upload all env vars to a powershell script on the target build file system + envVarReader := strings.NewReader(flattenedEnvVars) + uuid := uuid.TimeOrderedUUID() + envVarPath = fmt.Sprintf(`${env:SYSTEMROOT}\Temp\packer-env-vars-%s.ps1`, uuid) + log.Printf("Uploading env vars to %s", envVarPath) + err = p.communicator.Upload(envVarPath, envVarReader, nil) + if err != nil { + return "", fmt.Errorf("Error uploading ps script containing env vars: %s", err) + } + return +} + func (p *Provisioner) createCommandText() (command string, err error) { // Return the interpolated command if p.config.ElevatedUser == "" { @@ -377,12 +403,15 @@ func (p *Provisioner) createCommandText() (command string, err error) { } func (p *Provisioner) createCommandTextNonPrivileged() (command string, err error) { - // Create environment variables to set before executing the command - flattenedEnvVars := p.createFlattenedEnvVars(false) + // Prepare everything needed to enable the required env vars within the remote environment + envVarPath, err := p.prepareEnvVars(false) + if err != nil { + return "", err + } p.config.ctx.Data = &ExecuteCommandTemplate{ - Vars: flattenedEnvVars, Path: p.config.RemotePath, + Vars: envVarPath, } command, err = interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) @@ -395,17 +424,10 @@ func (p *Provisioner) createCommandTextNonPrivileged() (command string, err erro } func (p *Provisioner) createCommandTextPrivileged() (command string, err error) { - // Can't double escape the env vars, lets create shiny new ones - flattenedEnvVars := p.createFlattenedEnvVars(true) - // Need to create a mini ps1 script containing all of the environment variables we want; - // we'll be dot-sourcing this later - envVarReader := strings.NewReader(flattenedEnvVars) - uuid := uuid.TimeOrderedUUID() - envVarPath := fmt.Sprintf(`${env:SYSTEMROOT}\Temp\packer-env-vars-%s.ps1`, uuid) - log.Printf("Uploading env vars to %s", envVarPath) - err = p.communicator.Upload(envVarPath, envVarReader, nil) + // Prepare everything needed to enable the required env vars within the remote environment + envVarPath, err := p.prepareEnvVars(true) if err != nil { - return "", fmt.Errorf("Error preparing elevated powershell script: %s", err) + return "", err } p.config.ctx.Data = &ExecuteCommandTemplate{ From a7b118ed94791a96056992398db8257d1a779a38 Mon Sep 17 00:00:00 2001 From: DanHam Date: Wed, 25 Oct 2017 22:47:08 +0100 Subject: [PATCH 201/251] Fix tests post changes. Add test for upload func. --- provisioner/powershell/provisioner_test.go | 85 +++++++++++++++------- 1 file changed, 58 insertions(+), 27 deletions(-) diff --git a/provisioner/powershell/provisioner_test.go b/provisioner/powershell/provisioner_test.go index e7e64d52e..749564d4d 100644 --- a/provisioner/powershell/provisioner_test.go +++ b/provisioner/powershell/provisioner_test.go @@ -79,8 +79,8 @@ func TestProvisionerPrepare_Defaults(t *testing.T) { t.Error("expected elevated_password to be empty") } - if p.config.ExecuteCommand != `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};{{.Vars}}&'{{.Path}}';exit $LastExitCode }"` { - t.Fatalf(`Default command should be 'powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};{{.Vars}}&'{{.Path}}';exit $LastExitCode }"', but got '%s'`, p.config.ExecuteCommand) + if p.config.ExecuteCommand != `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}';exit $LastExitCode }"` { + t.Fatalf(`Default command should be 'powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}';exit $LastExitCode }"', but got '%s'`, p.config.ExecuteCommand) } if p.config.ElevatedExecuteCommand != `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}'; exit $LastExitCode }"` { @@ -403,7 +403,7 @@ func TestProvisionerProvision_Inline(t *testing.T) { ui := testUi() p := new(Provisioner) - // Defaults provided by Packer + // Defaults provided by Packer - env vars should not appear in cmd p.config.PackerBuildName = "vmware" p.config.PackerBuilderType = "iso" comm := new(packer.MockCommunicator) @@ -413,11 +413,14 @@ func TestProvisionerProvision_Inline(t *testing.T) { t.Fatal("should not have error") } - expectedCommand := `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; &'c:/Windows/Temp/inlineScript.ps1';exit $LastExitCode }"` - if comm.StartCmd.Command != expectedCommand { - t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) + cmd := comm.StartCmd.Command + re := regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/inlineScript.ps1';exit \$LastExitCode }"`) + matched := re.MatchString(cmd) + if !matched { + t.Fatalf("Got unexpected command: %s", cmd) } + // User supplied env vars should not change things envVars := make([]string, 2) envVars[0] = "FOO=BAR" envVars[1] = "BAR=BAZ" @@ -430,9 +433,11 @@ func TestProvisionerProvision_Inline(t *testing.T) { t.Fatal("should not have error") } - expectedCommand = `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:BAR=\"BAZ\"; $env:FOO=\"BAR\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; &'c:/Windows/Temp/inlineScript.ps1';exit $LastExitCode }"` - if comm.StartCmd.Command != expectedCommand { - t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) + cmd = comm.StartCmd.Command + re = regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/inlineScript.ps1';exit \$LastExitCode }"`) + matched = re.MatchString(cmd) + if !matched { + t.Fatalf("Got unexpected command: %s", cmd) } } @@ -455,11 +460,12 @@ func TestProvisionerProvision_Scripts(t *testing.T) { t.Fatal("should not have error") } - expectedCommand := `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:PACKER_BUILDER_TYPE=\"footype\"; $env:PACKER_BUILD_NAME=\"foobuild\"; &'c:/Windows/Temp/script.ps1';exit $LastExitCode }"` - if comm.StartCmd.Command != expectedCommand { - t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) + cmd := comm.StartCmd.Command + re := regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/script.ps1';exit \$LastExitCode }"`) + matched := re.MatchString(cmd) + if !matched { + t.Fatalf("Got unexpected command: %s", cmd) } - } func TestProvisionerProvision_ScriptsWithEnvVars(t *testing.T) { @@ -488,9 +494,11 @@ func TestProvisionerProvision_ScriptsWithEnvVars(t *testing.T) { t.Fatal("should not have error") } - expectedCommand := `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:BAR=\"BAZ\"; $env:FOO=\"BAR\"; $env:PACKER_BUILDER_TYPE=\"footype\"; $env:PACKER_BUILD_NAME=\"foobuild\"; &'c:/Windows/Temp/script.ps1';exit $LastExitCode }"` - if comm.StartCmd.Command != expectedCommand { - t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) + cmd := comm.StartCmd.Command + re := regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/script.ps1';exit \$LastExitCode }"`) + matched := re.MatchString(cmd) + if !matched { + t.Fatalf("Got unexpected command: %s", cmd) } } @@ -547,11 +555,11 @@ func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) { {"FOO==bar"}, // User env var with value starting with equals } expected := []string{ - `$env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, - `$env:FOO=\"bar\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, - `$env:BAZ=\"qux\"; $env:FOO=\"bar\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, - `$env:FOO=\"bar=baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, - `$env:FOO=\"=bar\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; `, + `$env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, + `$env:FOO="bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, + `$env:BAZ="qux"; $env:FOO="bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, + `$env:FOO="bar=baz"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, + `$env:FOO="=bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, } p := new(Provisioner) @@ -571,7 +579,6 @@ func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) { } func TestProvision_createCommandText(t *testing.T) { - config := testConfig() config["remote_path"] = "c:/Windows/Temp/script.ps1" p := new(Provisioner) @@ -586,22 +593,46 @@ func TestProvision_createCommandText(t *testing.T) { // Non-elevated cmd, _ := p.createCommandText() - expectedCommand := `powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};$env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; &'c:/Windows/Temp/script.ps1';exit $LastExitCode }"` - - if cmd != expectedCommand { - t.Fatalf("Expected Non-elevated command: %s, got %s", expectedCommand, cmd) + re := regexp.MustCompile(`powershell -executionpolicy bypass "& { if \(Test-Path variable:global:ProgressPreference\){\$ProgressPreference='SilentlyContinue'};\. \${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1; &'c:/Windows/Temp/script.ps1';exit \$LastExitCode }"`) + matched := re.MatchString(cmd) + if !matched { + t.Fatalf("Got unexpected command: %s", cmd) } // Elevated p.config.ElevatedUser = "vagrant" p.config.ElevatedPassword = "vagrant" cmd, _ = p.createCommandText() - matched, _ := regexp.MatchString("powershell -executionpolicy bypass -file \"%TEMP%(.{1})packer-elevated-shell.*", cmd) + re = regexp.MustCompile(`powershell -executionpolicy bypass -file "%TEMP%\\packer-elevated-shell-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1"`) + matched = re.MatchString(cmd) if !matched { t.Fatalf("Got unexpected elevated command: %s", cmd) } } +func TestProvision_uploadEnvVars(t *testing.T) { + p := new(Provisioner) + comm := new(packer.MockCommunicator) + p.communicator = comm + + flattenedEnvVars := `$env:PACKER_BUILDER_TYPE="footype"; $env:PACKER_BUILD_NAME="foobuild";` + + envVarPath, err := p.uploadEnvVars(flattenedEnvVars) + if err != nil { + t.Fatalf("Did not expect error: %s", err.Error()) + } + + if comm.UploadCalled != true { + t.Fatalf("Failed to upload env var file") + } + + re := regexp.MustCompile(`\${env:SYSTEMROOT}\\Temp\\packer-env-vars-[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}\.ps1`) + matched := re.MatchString(envVarPath) + if !matched { + t.Fatalf("Got unexpected path for env var file: %s", envVarPath) + } +} + func TestProvision_generateElevatedShellRunner(t *testing.T) { // Non-elevated From 2b1aa0458340692fe755ef6423ea6911573c107e Mon Sep 17 00:00:00 2001 From: DanHam Date: Wed, 7 Feb 2018 15:47:21 +0000 Subject: [PATCH 202/251] Update docs to reflect new upload and dot source of env var for std ps cmd --- .../docs/provisioners/powershell.html.md | 44 ++++++++++--------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/website/source/docs/provisioners/powershell.html.md b/website/source/docs/provisioners/powershell.html.md index f085a6e82..1eabd848c 100644 --- a/website/source/docs/provisioners/powershell.html.md +++ b/website/source/docs/provisioners/powershell.html.md @@ -29,20 +29,21 @@ The example below is fully functional. ## Configuration Reference The reference of available configuration options is listed below. The only -required element is either "inline" or "script". Every other option is optional. +required element is either "inline" or "script". Every other option is +optional. Exactly *one* of the following is required: - `inline` (array of strings) - This is an array of commands to execute. The - commands are concatenated by newlines and turned into a single file, so they - are all executed within the same context. This allows you to change + commands are concatenated by newlines and turned into a single file, so + they are all executed within the same context. This allows you to change directories in one command and use something in the directory in the next and so on. Inline scripts are the easiest way to pull off simple tasks within the machine. - `script` (string) - The path to a script to upload and execute in - the machine. This path can be absolute or relative. If it is relative, it is - relative to the working directory when Packer is executed. + the machine. This path can be absolute or relative. If it is relative, it + is relative to the working directory when Packer is executed. - `scripts` (array of strings) - An array of scripts to execute. The scripts will be uploaded and executed in the order specified. Each script is @@ -51,12 +52,12 @@ Exactly *one* of the following is required: Optional parameters: -- `binary` (boolean) - If true, specifies that the script(s) are binary files, - and Packer should therefore not convert Windows line endings to Unix line - endings (if there are any). By default this is false. +- `binary` (boolean) - If true, specifies that the script(s) are binary + files, and Packer should therefore not convert Windows line endings to Unix + line endings (if there are any). By default this is false. -- `elevated_execute_command` (string) - The command to use to execute the elevated - script. By default this is as follows: +- `elevated_execute_command` (string) - The command to use to execute the + elevated script. By default this is as follows: ``` powershell powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}'; exit $LastExitCode }" @@ -65,32 +66,34 @@ Optional parameters: The value of this is treated as [configuration template](/docs/templates/engine.html). There are two available variables: `Path`, which is the path to the script to run, and - `Vars`, which is the location of a temp file containing the list of `environment_vars`, if configured. + `Vars`, which is the location of a temp file containing the list of + `environment_vars`, if configured. - `environment_vars` (array of strings) - An array of key/value pairs to inject prior to the execute\_command. The format should be `key=value`. - Packer injects some environmental variables by default into the environment, - as well, which are covered in the section below. + Packer injects some environmental variables by default into the + environment, as well, which are covered in the section below. - `execute_command` (string) - The command to use to execute the script. By default this is as follows: ``` powershell - powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};{{.Vars}}&'{{.Path}}';exit $LastExitCode }" + powershell -executionpolicy bypass "& { if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'};. {{.Vars}}; &'{{.Path}}'; exit $LastExitCode }" ``` The value of this is treated as [configuration template](/docs/templates/engine.html). There are two available variables: `Path`, which is the path to the script to run, and - `Vars`, which is the list of `environment_vars`, if configured. + `Vars`, which is the location of a temp file containing the list of + `environment_vars`, if configured. - `elevated_user` and `elevated_password` (string) - If specified, the PowerShell script will be run with elevated privileges using the given Windows user. - `remote_path` (string) - The path where the script will be uploaded to in - the machine. This defaults to "c:/Windows/Temp/script.ps1". This value must be a - writable location and any parent directories must already exist. + the machine. This defaults to "c:/Windows/Temp/script.ps1". This value must + be a writable location and any parent directories must already exist. - `start_retry_timeout` (string) - The amount of time to attempt to *start* the remote process. By default this is "5m" or 5 minutes. This setting @@ -111,9 +114,10 @@ commonly useful environmental variables: This is most useful when Packer is making multiple builds and you want to distinguish them slightly from a common provisioning script. -- `PACKER_BUILDER_TYPE` is the type of the builder that was used to create the - machine that the script is running on. This is useful if you want to run - only certain parts of the script on systems built with certain builders. +- `PACKER_BUILDER_TYPE` is the type of the builder that was used to create + the machine that the script is running on. This is useful if you want to + run only certain parts of the script on systems built with certain + builders. - `PACKER_HTTP_ADDR` If using a builder that provides an http server for file transfer (such as hyperv, parallels, qemu, virtualbox, and vmware), this From e982bc4ea5af42d66c34dd56fa511c83602b3431 Mon Sep 17 00:00:00 2001 From: DanHam Date: Wed, 7 Feb 2018 15:51:58 +0000 Subject: [PATCH 203/251] Fix copy/paste of description from shell provisioner --- website/source/docs/provisioners/powershell.html.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/docs/provisioners/powershell.html.md b/website/source/docs/provisioners/powershell.html.md index 1eabd848c..ae897489f 100644 --- a/website/source/docs/provisioners/powershell.html.md +++ b/website/source/docs/provisioners/powershell.html.md @@ -1,8 +1,8 @@ --- description: | - The shell Packer provisioner provisions machines built by Packer using shell - scripts. Shell provisioning is the easiest way to get software installed and - configured on a machine. + The PowerShell Packer provisioner runs PowerShell scripts on Windows + machines. + It assumes that the communicator in use is WinRM. layout: docs page_title: 'PowerShell - Provisioners' sidebar_current: 'docs-provisioners-powershell' From aaf7102b9a7a5ba71e79ee5f78e7629858944568 Mon Sep 17 00:00:00 2001 From: DanHam Date: Thu, 21 Sep 2017 23:26:14 +0100 Subject: [PATCH 204/251] Tests for escape of chars special to PowerShell in user supplied data --- provisioner/powershell/provisioner_test.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/provisioner/powershell/provisioner_test.go b/provisioner/powershell/provisioner_test.go index 749564d4d..2791601fa 100644 --- a/provisioner/powershell/provisioner_test.go +++ b/provisioner/powershell/provisioner_test.go @@ -518,6 +518,12 @@ func TestProvisioner_createFlattenedElevatedEnvVars_windows(t *testing.T) { {"FOO=bar", "BAZ=qux"}, // Multiple user env vars {"FOO=bar=baz"}, // User env var with value containing equals {"FOO==bar"}, // User env var with value starting with equals + // Test escaping of characters special to PowerShell + {"FOO=bar$baz"}, // User env var with value containing dollar + {"FOO=bar\"baz"}, // User env var with value containing a double quote + {"FOO=bar'baz"}, // User env var with value containing a single quote + {"FOO=bar`baz"}, // User env var with value containing a backtick + } expected := []string{ `$env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, @@ -525,6 +531,10 @@ func TestProvisioner_createFlattenedElevatedEnvVars_windows(t *testing.T) { `$env:BAZ="qux"; $env:FOO="bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, `$env:FOO="bar=baz"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, `$env:FOO="=bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, + "$env:FOO=\"bar`$baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", + "$env:FOO=\"bar`\"baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", + "$env:FOO=\"bar`'baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", + "$env:FOO=\"bar``baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", } p := new(Provisioner) @@ -553,6 +563,11 @@ func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) { {"FOO=bar", "BAZ=qux"}, // Multiple user env vars {"FOO=bar=baz"}, // User env var with value containing equals {"FOO==bar"}, // User env var with value starting with equals + // Test escaping of characters special to PowerShell + {"FOO=bar$baz"}, // User env var with value containing dollar + {"FOO=bar\"baz"}, // User env var with value containing a double quote + {"FOO=bar'baz"}, // User env var with value containing a single quote + {"FOO=bar`baz"}, // User env var with value containing a backtick } expected := []string{ `$env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, @@ -560,6 +575,10 @@ func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) { `$env:BAZ="qux"; $env:FOO="bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, `$env:FOO="bar=baz"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, `$env:FOO="=bar"; $env:PACKER_BUILDER_TYPE="iso"; $env:PACKER_BUILD_NAME="vmware"; `, + "$env:FOO=\"bar`$baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", + "$env:FOO=\"bar`\"baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", + "$env:FOO=\"bar`'baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", + "$env:FOO=\"bar``baz\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; ", } p := new(Provisioner) From 2d830d5d43c5136ee7498fe297cd48d0b17ae59b Mon Sep 17 00:00:00 2001 From: DanHam Date: Sun, 15 Oct 2017 13:28:59 +0100 Subject: [PATCH 205/251] Auto escape chars special to PowerShell in user supplied data --- provisioner/powershell/provisioner.go | 34 +++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go index 8b9b53440..92f881314 100644 --- a/provisioner/powershell/provisioner.go +++ b/provisioner/powershell/provisioner.go @@ -24,6 +24,13 @@ import ( var retryableSleep = 2 * time.Second +var psEscape = strings.NewReplacer( + "$", "`$", + "\"", "`\"", + "`", "``", + "'", "`'", +) + type Config struct { common.PackerConfig `mapstructure:",squash"` @@ -359,7 +366,13 @@ func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { // Split vars into key/value components for _, envVar := range p.config.Vars { keyValue := strings.SplitN(envVar, "=", 2) - envVars[keyValue[0]] = keyValue[1] + // Escape chars special to PS in each env var value + escapedEnvVarValue := psEscape.Replace(keyValue[1]) + if escapedEnvVarValue != keyValue[1] { + log.Printf("Env var %s converted to %s after escaping chars special to PS", keyValue[1], + escapedEnvVarValue) + } + envVars[keyValue[0]] = escapedEnvVarValue } // Create a list of env var keys in sorted order @@ -480,13 +493,26 @@ func (p *Provisioner) generateElevatedRunner(command string) (uploadedPath strin } escapedCommand := buffer.String() log.Printf("Command [%s] converted to [%s] for use in XML string", command, escapedCommand) - buffer.Reset() + // Escape chars special to PowerShell in the ElevatedUser string + escapedElevatedUser := psEscape.Replace(p.config.ElevatedUser) + if escapedElevatedUser != p.config.ElevatedUser { + log.Printf("Elevated user %s converted to %s after escaping chars special to PowerShell", + p.config.ElevatedUser, escapedElevatedUser) + } + + // Escape chars special to PowerShell in the ElevatedPassword string + escapedElevatedPassword := psEscape.Replace(p.config.ElevatedPassword) + if escapedElevatedPassword != p.config.ElevatedPassword { + log.Printf("Elevated password %s converted to %s after escaping chars special to PowerShell", + p.config.ElevatedPassword, escapedElevatedPassword) + } + // Generate command err = elevatedTemplate.Execute(&buffer, elevatedOptions{ - User: p.config.ElevatedUser, - Password: p.config.ElevatedPassword, + User: escapedElevatedUser, + Password: escapedElevatedPassword, TaskName: taskName, TaskDescription: "Packer elevated task", LogFile: logFile, From 4cc078256d4bcbde8874641a39a3aa58ca067dc2 Mon Sep 17 00:00:00 2001 From: DanHam Date: Mon, 30 Oct 2017 00:14:06 +0000 Subject: [PATCH 206/251] Fixer for templates affected by auto escape of special powershell chars --- fix/fixer.go | 2 + fix/fixer_powershell_escapes.go | 73 +++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+) create mode 100644 fix/fixer_powershell_escapes.go diff --git a/fix/fixer.go b/fix/fixer.go index 5ca0b3a18..d5622b299 100644 --- a/fix/fixer.go +++ b/fix/fixer.go @@ -34,6 +34,7 @@ func init() { "amazon-shutdown_behavior": new(FixerAmazonShutdownBehavior), "amazon-enhanced-networking": new(FixerAmazonEnhancedNetworking), "docker-email": new(FixerDockerEmail), + "powershell-escapes": new(FixerPowerShellEscapes), } FixerOrder = []string{ @@ -51,5 +52,6 @@ func init() { "amazon-shutdown_behavior", "amazon-enhanced-networking", "docker-email", + "powershell-escapes", } } diff --git a/fix/fixer_powershell_escapes.go b/fix/fixer_powershell_escapes.go new file mode 100644 index 000000000..9da9ae91f --- /dev/null +++ b/fix/fixer_powershell_escapes.go @@ -0,0 +1,73 @@ +package fix + +import ( + "github.com/mitchellh/mapstructure" + "strings" +) + +// FixerPowerShellEscapes removes the PowerShell escape character from user +// environment variables and elevated username and password strings +type FixerPowerShellEscapes struct{} + +func (FixerPowerShellEscapes) Fix(input map[string]interface{}) (map[string]interface{}, error) { + type template struct { + Provisioners []interface{} + } + + var psUnescape = strings.NewReplacer( + "`$", "$", + "`\"", "\"", + "``", "`", + "`'", "'", + ) + + // Decode the input into our structure, if we can + var tpl template + if err := mapstructure.WeakDecode(input, &tpl); err != nil { + return nil, err + } + + for i, raw := range tpl.Provisioners { + var provisioners map[string]interface{} + if err := mapstructure.Decode(raw, &provisioners); err != nil { + // Ignore errors, could be a non-map + continue + } + + if ok := provisioners["type"] == "powershell"; !ok { + continue + } + + if _, ok := provisioners["elevated_user"]; ok { + provisioners["elevated_user"] = psUnescape.Replace(provisioners["elevated_user"].(string)) + } + if _, ok := provisioners["elevated_password"]; ok { + provisioners["elevated_password"] = psUnescape.Replace(provisioners["elevated_password"].(string)) + } + if raw, ok := provisioners["environment_vars"]; ok { + var env_vars []string + if err := mapstructure.Decode(raw, &env_vars); err != nil { + continue + } + env_vars_unescaped := make([]interface{}, len(env_vars)) + for j, env_var := range env_vars { + env_vars_unescaped[j] = psUnescape.Replace(env_var) + } + // Replace with unescaped environment variables + provisioners["environment_vars"] = env_vars_unescaped + } + + // Write all changes back to template + tpl.Provisioners[i] = provisioners + } + + if len(tpl.Provisioners) > 0 { + input["provisioners"] = tpl.Provisioners + } + + return input, nil +} + +func (FixerPowerShellEscapes) Synopsis() string { + return `Removes PowerShell escapes from user env vars and elevated username and password strings` +} From 6559a26c1106bbd689c63aa8797ec86354e349ae Mon Sep 17 00:00:00 2001 From: DanHam Date: Thu, 8 Feb 2018 18:03:44 +0000 Subject: [PATCH 207/251] Update Windows build demo script and template to reflect new PS behaviour --- .../intro/getting-started/build-image.html.md | 65 ++++++++++++------- 1 file changed, 43 insertions(+), 22 deletions(-) diff --git a/website/source/intro/getting-started/build-image.html.md b/website/source/intro/getting-started/build-image.html.md index ba0683797..273618294 100644 --- a/website/source/intro/getting-started/build-image.html.md +++ b/website/source/intro/getting-started/build-image.html.md @@ -448,14 +448,19 @@ variables we will set in our build template; copy the contents into your own `sample_script.ps1` and provide the path to it in your build template: ```powershell -Write-Host "PACKER_BUILD_NAME is automatically set for you, " -NoNewline -Write-Host "or you can set it in your builder variables; " -NoNewline +Write-Host "PACKER_BUILD_NAME is an env var Packer automatically sets for you." +Write-Host "...or you can set it in your builder variables." Write-Host "The default for this builder is:" $Env:PACKER_BUILD_NAME -Write-Host "Use backticks as the escape character when required in powershell:" +Write-Host "The PowerShell provisioner will automatically escape characters" +Write-Host "considered special to PowerShell when it encounters them in" +Write-Host "your environment variables or in the PowerShell elevated" +Write-Host "username/password fields." Write-Host "For example, VAR1 from our config is:" $Env:VAR1 Write-Host "Likewise, VAR2 is:" $Env:VAR2 -Write-Host "Finally, VAR3 is:" $Env:VAR3 +Write-Host "VAR3 is:" $Env:VAR3 +Write-Host "Finally, VAR4 is:" $Env:VAR4 +Write-Host "None of the special characters needed escaping in the template" ``` Finally, we need to create the actual [build template]( @@ -514,7 +519,12 @@ customize and control the build process: { "type": "powershell", "environment_vars": ["DEVOPS_LIFE_IMPROVER=PACKER"], - "inline": "Write-Host \"HELLO NEW USER; WELCOME TO $Env:DEVOPS_LIFE_IMPROVER\"" + "inline": [ + "Write-Host \"HELLO NEW USER; WELCOME TO $Env:DEVOPS_LIFE_IMPROVER\"", + "Write-Host \"You need to use backtick escapes when using\"", + "Write-Host \"characters such as DOLLAR`$ directly in a command\"", + "Write-Host \"or in your own scripts.\"" + ] }, { "type": "windows-restart" @@ -523,9 +533,10 @@ customize and control the build process: "script": "./sample_script.ps1", "type": "powershell", "environment_vars": [ - "VAR1=A`$Dollar", - "VAR2=A``Backtick", - "VAR3=A`'SingleQuote" + "VAR1=A$Dollar", + "VAR2=A`Backtick", + "VAR3=A'SingleQuote", + "VAR4=A\"DoubleQuote" ] } ] @@ -550,39 +561,49 @@ You should see output like this: ``` amazon-ebs output will be in this color. -==> amazon-ebs: Prevalidating AMI Name: packer-demo-1507933843 - amazon-ebs: Found Image ID: ami-23d93c59 -==> amazon-ebs: Creating temporary keypair: packer_59e13e94-203a-1bca-5327-bebf0d5ad15a -==> amazon-ebs: Creating temporary security group for this instance: packer_59e13ea9-3220-8dab-29c0-ed7f71e221a1 +==> amazon-ebs: Prevalidating AMI Name: packer-demo-1518111383 + amazon-ebs: Found Image ID: ami-013e197b +==> amazon-ebs: Creating temporary keypair: packer_5a7c8a97-f27f-6708-cc3c-6ab9b4688b13 +==> amazon-ebs: Creating temporary security group for this instance: packer_5a7c8ab5-444c-13f2-0aa1-18d124cdb975 ==> amazon-ebs: Authorizing access to port 5985 from 0.0.0.0/0 in the temporary security group... ==> amazon-ebs: Launching a source AWS instance... ==> amazon-ebs: Adding tags to source instance amazon-ebs: Adding tag: "Name": "Packer Builder" - amazon-ebs: Instance ID: i-0349406ac85f02166 -==> amazon-ebs: Waiting for instance (i-0349406ac85f02166) to become ready... + amazon-ebs: Instance ID: i-0c8c808a3b945782a +==> amazon-ebs: Waiting for instance (i-0c8c808a3b945782a) to become ready... ==> amazon-ebs: Skipping waiting for password since WinRM password set... ==> amazon-ebs: Waiting for WinRM to become available... amazon-ebs: WinRM connected. ==> amazon-ebs: Connected to WinRM! ==> amazon-ebs: Provisioning with Powershell... -==> amazon-ebs: Provisioning with powershell script: /var/folders/15/d0f7gdg13rnd1cxp7tgmr55c0000gn/T/packer-powershell-provisioner175214995 +==> amazon-ebs: Provisioning with powershell script: /var/folders/15/d0f7gdg13rnd1cxp7tgmr55c0000gn/T/packer-powershell-provisioner943573503 amazon-ebs: HELLO NEW USER; WELCOME TO PACKER + amazon-ebs: You need to use backtick escapes when using + amazon-ebs: characters such as DOLLAR$ directly in a command + amazon-ebs: or in your own scripts. ==> amazon-ebs: Restarting Machine ==> amazon-ebs: Waiting for machine to restart... - amazon-ebs: WIN-TEM0TDL751M restarted. + amazon-ebs: WIN-NI8N45RPJ23 restarted. ==> amazon-ebs: Machine successfully restarted, moving on ==> amazon-ebs: Provisioning with Powershell... ==> amazon-ebs: Provisioning with powershell script: ./sample_script.ps1 - amazon-ebs: PACKER_BUILD_NAME is automatically set for you, or you can set it in your builder variables; The default for this builder is: amazon-ebs - amazon-ebs: Use backticks as the escape character when required in powershell: + amazon-ebs: PACKER_BUILD_NAME is an env var Packer automatically sets for you. + amazon-ebs: ...or you can set it in your builder variables. + amazon-ebs: The default for this builder is: amazon-ebs + amazon-ebs: The PowerShell provisioner will automatically escape characters + amazon-ebs: considered special to PowerShell when it encounters them in + amazon-ebs: your environment variables or in the PowerShell elevated + amazon-ebs: username/password fields. amazon-ebs: For example, VAR1 from our config is: A$Dollar amazon-ebs: Likewise, VAR2 is: A`Backtick - amazon-ebs: Finally, VAR3 is: A'SingleQuote + amazon-ebs: VAR3 is: A'SingleQuote + amazon-ebs: Finally, VAR4 is: A"DoubleQuote + amazon-ebs: None of the special characters needed escaping in the template ==> amazon-ebs: Stopping the source instance... amazon-ebs: Stopping instance, attempt 1 ==> amazon-ebs: Waiting for the instance to stop... -==> amazon-ebs: Creating the AMI: packer-demo-1507933843 - amazon-ebs: AMI: ami-100fc56a +==> amazon-ebs: Creating the AMI: packer-demo-1518111383 + amazon-ebs: AMI: ami-f0060c8a ==> amazon-ebs: Waiting for AMI to become ready... ==> amazon-ebs: Terminating the source AWS instance... ==> amazon-ebs: Cleaning up any extra volumes... @@ -593,7 +614,7 @@ Build 'amazon-ebs' finished. ==> Builds finished. The artifacts of successful builds are: --> amazon-ebs: AMIs were created: -us-east-1: ami-100fc56a +us-east-1: ami-f0060c8a ``` And if you navigate to your EC2 dashboard you should see your shiny new AMI From 472a9226423cfa0d6c2fd709c2d0ab32b38a5f6d Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 8 Feb 2018 11:47:28 -0800 Subject: [PATCH 208/251] clean up ncloud docs --- website/source/docs/builders/ncloud.html.md | 96 ++++++++++++++------- website/source/layouts/docs.erb | 3 + 2 files changed, 70 insertions(+), 29 deletions(-) diff --git a/website/source/docs/builders/ncloud.html.md b/website/source/docs/builders/ncloud.html.md index ce38a9c71..9148ae568 100644 --- a/website/source/docs/builders/ncloud.html.md +++ b/website/source/docs/builders/ncloud.html.md @@ -1,7 +1,6 @@ --- description: | - As Packer allows users to develop a custom builder as a plugin, NAVER CLOUD PLATFORM provides its own Packer builder for your convenience. -You can use NAVER CLOUD PLATFORM's Packer builder to easily create your server images. + The ncloud builder allows you to create server images using the NAVER Cloud Platform. layout: docs page_title: 'Naver Cloud Platform - Builders' sidebar_current: 'docs-builders-ncloud' @@ -9,10 +8,61 @@ sidebar_current: 'docs-builders-ncloud' # NAVER CLOUD PLATFORM Builder -As Packer allows users to develop a custom builder as a plugin, NAVER CLOUD PLATFORM provides its own Packer builder for your convenience. -You can use NAVER CLOUD PLATFORM's Packer builder to easily create your server images. +The `ncloud` builder allows you to create server images using the [NAVER Cloud +Platform](https://www.ncloud.com/). -#### Sample code of template.json +## Configuration Reference + +### Required: + +- `ncloud_access_key` (string) - User's access key. Go to [\[Account + Management \> Authentication + Key\]](https://www.ncloud.com/mypage/manage/authkey) to create and view + your authentication key. + +- `ncloud_secret_key` (string) - User's secret key paired with the access + key. Go to [\[Account Management \> Authentication + Key\]](https://www.ncloud.com/mypage/manage/authkey) to create and view + your authentication key. + +- `server_image_product_code` (string) - Product code of an image to create. + (member\_server\_image\_no is required if not specified) + +- `server_product_code` (string) - Product (spec) code to create. + +### Optional: + +- `member_server_image_no` (string) - Previous image code. If there is an + image previously created, it can be used to create a new image. + (`server_image_product_code` is required if not specified) + +- `server_image_name` (string) - Name of an image to create. + +- `server_image_description` (string) - Description of an image to create. + +- `block_storage_size` (number) - You can add block storage ranging from 10 + GB to 2000 GB, in increments of 10 GB. + +- `access_control_group_configuration_no` (string) - This is used to allow + winrm access when you create a Windows server. An ACG that specifies an + access source (`0.0.0.0/0`) and allowed port (5985) must be created in + advance. + +- `user_data` (string) - Init script to run when an instance is created. + - For Linux servers, Python, Perl, and Shell scripts can be used. The + path of the script to run should be included at the beginning of the + script, like \#!/usr/bin/env python, \#!/bin/perl, or \#!/bin/bash. + - For Windows servers, only Visual Basic scripts can be used. + - All scripts must be written in English. +- `user_data_file` (string) - A path to a file containing a `user_data` + script. See above for more information. + +- `region` (string) - Name of the region where you want to create an image. + (default: Korea) + - values: Korea / US-West / HongKong / Singapore / Japan / Germany + + +## Sample code of template.json ``` { @@ -38,28 +88,10 @@ You can use NAVER CLOUD PLATFORM's Packer builder to easily create your server i } ``` -#### Description +## Requirements for creating Windows images -* type(required): "ncloud" -* ncloud_access_key (required): User's access key. Go to [[Account Management > Authentication Key]](https://www.ncloud.com/mypage/manage/authkey) to create and view your authentication key. -* ncloud_secret_key (required): User's secret key paired with the access key. Go to [[Account Management > Authentication Key]](https://www.ncloud.com/mypage/manage/authkey) to create and view your authentication key. -* server_image_product_code: Product code of an image to create. (member_server_image_no is required if not specified) -* server_product_code (required): Product (spec) code to create. -* member_server_image_no: Previous image code. If there is an image previously created, it can be used to create a new image. (server_image_product_code is required if not specified) -* server_image_name (option): Name of an image to create. -* server_image_description (option): Description of an image to create. -* block_storage_size (option): You can add block storage ranging from 10 GB to 2000 GB, in increments of 10 GB. -* access_control_group_configuration_no: This is used to allow winrm access when you create a Windows server. An ACG that specifies an access source ("0.0.0.0/0") and allowed port (5985) must be created in advance. -* user_data (option): Init script to run when an instance is created. - * For Linux servers, Python, Perl, and Shell scripts can be used. The path of the script to run should be included at the beginning of the script, like #!/usr/bin/env python, #!/bin/perl, or #!/bin/bash. - * For Windows servers, only Visual Basic scripts can be used. - * All scripts must be written in English. -* region (option): Name of the region where you want to create an image. (default: Korea) - * values: Korea / US-West / HongKong / Singapore / Japan / Germany - -### Requirements for creating Windows images - -You should include the following code in the packer configuration file for provision when creating a Windows server. +You should include the following code in the packer configuration file for +provision when creating a Windows server. ``` "builders": [ @@ -82,7 +114,13 @@ You should include the following code in the packer configuration file for provi ] ``` -### Note +## Note -* You can only create as many public IP addresses as the number of server instances you own. Before running Packer, please make sure that the number of public IP addresses previously created is not larger than the number of server instances (including those to be used to create server images). -* When you forcibly terminate the packer process or close the terminal (command) window where the process is running, the resources may not be cleaned up as the packer process no longer runs. In this case, you should manually clean up the resources associated with the process. +* You can only create as many public IP addresses as the number of server + instances you own. Before running Packer, please make sure that the number of + public IP addresses previously created is not larger than the number of + server instances (including those to be used to create server images). +* When you forcibly terminate the packer process or close the terminal + (command) window where the process is running, the resources may not be + cleaned up as the packer process no longer runs. In this case, you should + manually clean up the resources associated with the process. diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 8470e0d35..7f12d326c 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -122,6 +122,9 @@ > LXD + > + Naver + > Null From 57faecbdd441869339bfd542431d21579abe3431 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 8 Feb 2018 11:54:02 -0800 Subject: [PATCH 209/251] add @YuSungDuk as codeowner for ncloud --- CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/CODEOWNERS b/CODEOWNERS index c037a38d4..050f4d084 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -13,6 +13,7 @@ /builder/oracle/ @prydie @owainlewis /builder/profitbricks/ @jasmingacic /builder/triton/ @jen20 @sean- +/builder/ncloud/ @YuSungDuk # provisioners From 7e36cfcff1e09470a3919d9fea08a7cd151fe960 Mon Sep 17 00:00:00 2001 From: Edouard BONLIEU Date: Thu, 6 Apr 2017 11:19:17 +0200 Subject: [PATCH 210/251] Add Scaleway builder --- builder/scaleway/artifact.go | 49 ++++ builder/scaleway/artifact_test.go | 33 +++ builder/scaleway/builder.go | 89 +++++++ builder/scaleway/builder_test.go | 237 ++++++++++++++++++ builder/scaleway/config.go | 103 ++++++++ builder/scaleway/ssh.go | 30 +++ builder/scaleway/step_create_server.go | 61 +++++ builder/scaleway/step_create_ssh_key.go | 88 +++++++ builder/scaleway/step_server_info.go | 44 ++++ builder/scaleway/step_shutdown.go | 43 ++++ builder/scaleway/step_snapshot.go | 48 ++++ builder/scaleway/step_terminate.go | 34 +++ command/plugin.go | 2 + .../vagrant-cloud/post-processor.go | 2 + post-processor/vagrant/post-processor.go | 3 + post-processor/vagrant/scaleway.go | 52 ++++ vendor/vendor.json | 84 +++++++ 17 files changed, 1002 insertions(+) create mode 100644 builder/scaleway/artifact.go create mode 100644 builder/scaleway/artifact_test.go create mode 100644 builder/scaleway/builder.go create mode 100644 builder/scaleway/builder_test.go create mode 100644 builder/scaleway/config.go create mode 100644 builder/scaleway/ssh.go create mode 100644 builder/scaleway/step_create_server.go create mode 100644 builder/scaleway/step_create_ssh_key.go create mode 100644 builder/scaleway/step_server_info.go create mode 100644 builder/scaleway/step_shutdown.go create mode 100644 builder/scaleway/step_snapshot.go create mode 100644 builder/scaleway/step_terminate.go create mode 100644 post-processor/vagrant/scaleway.go diff --git a/builder/scaleway/artifact.go b/builder/scaleway/artifact.go new file mode 100644 index 000000000..fe1efb96f --- /dev/null +++ b/builder/scaleway/artifact.go @@ -0,0 +1,49 @@ +package scaleway + +import ( + "fmt" + "log" + + "github.com/scaleway/scaleway-cli/pkg/api" +) + +type Artifact struct { + // The name of the snapshot + snapshotName string + + // The ID of the snapshot + snapshotId string + + // The name of the region + regionName string + + // The client for making API calls + client *api.ScalewayAPI +} + +func (*Artifact) BuilderId() string { + return BuilderId +} + +func (*Artifact) Files() []string { + // No files with Scaleway + return nil +} + +func (a *Artifact) Id() string { + return fmt.Sprintf("%s:%s", a.regionName, a.snapshotId) +} + +func (a *Artifact) String() string { + return fmt.Sprintf("A snapshot was created: '%v' (ID: %v) in region '%v'", a.snapshotName, a.snapshotId, a.regionName) +} + +func (a *Artifact) State(name string) interface{} { + return nil +} + +func (a *Artifact) Destroy() error { + log.Printf("Destroying image: %s (%s)", a.snapshotId, a.snapshotName) + err := a.client.DeleteSnapshot(a.snapshotId) + return err +} diff --git a/builder/scaleway/artifact_test.go b/builder/scaleway/artifact_test.go new file mode 100644 index 000000000..8805ad686 --- /dev/null +++ b/builder/scaleway/artifact_test.go @@ -0,0 +1,33 @@ +package scaleway + +import ( + "testing" + + "github.com/hashicorp/packer/packer" +) + +func TestArtifact_Impl(t *testing.T) { + var raw interface{} + raw = &Artifact{} + if _, ok := raw.(packer.Artifact); !ok { + t.Fatalf("Artifact should be artifact") + } +} + +func TestArtifactId(t *testing.T) { + a := &Artifact{"packer-foobar", "cc586e45-5156-4f71-b223-cf406b10dd1c", "ams1", nil} + expected := "ams1:cc586e45-5156-4f71-b223-cf406b10dd1c" + + if a.Id() != expected { + t.Fatalf("artifact ID should match: %v", expected) + } +} + +func TestArtifactString(t *testing.T) { + a := &Artifact{"packer-foobar", "cc586e45-5156-4f71-b223-cf406b10dd1c", "ams1", nil} + expected := "A snapshot was created: 'packer-foobar' (ID: cc586e45-5156-4f71-b223-cf406b10dd1c) in region 'ams1'" + + if a.String() != expected { + t.Fatalf("artifact string should match: %v", expected) + } +} diff --git a/builder/scaleway/builder.go b/builder/scaleway/builder.go new file mode 100644 index 000000000..2c92e31b6 --- /dev/null +++ b/builder/scaleway/builder.go @@ -0,0 +1,89 @@ +// The scaleway package contains a packer.Builder implementation +// that builds Scaleway images (snapshots). + +package scaleway + +import ( + "fmt" + "log" + + "github.com/hashicorp/packer/common" + "github.com/hashicorp/packer/helper/communicator" + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +// The unique id for the builder +const BuilderId = "pearkes.scaleway" + +type Builder struct { + config Config + runner multistep.Runner +} + +func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { + c, warnings, errs := NewConfig(raws...) + if errs != nil { + return warnings, errs + } + b.config = *c + + return nil, nil +} + +func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { + client, _ := api.NewScalewayAPI(b.config.Organization, b.config.Token, b.config.UserAgent, b.config.Region) + + state := new(multistep.BasicStateBag) + state.Put("config", b.config) + state.Put("client", client) + state.Put("hook", hook) + state.Put("ui", ui) + + steps := []multistep.Step{ + &stepCreateSSHKey{ + Debug: b.config.PackerDebug, + DebugKeyPath: fmt.Sprintf("scw_%s.pem", b.config.PackerBuildName), + }, + new(stepCreateServer), + new(stepServerInfo), + &communicator.StepConnect{ + Config: &b.config.Comm, + Host: commHost, + SSHConfig: sshConfig, + }, + new(common.StepProvision), + new(stepShutdown), + new(stepSnapshot), + new(stepTerminate), + } + + b.runner = common.NewRunner(steps, b.config.PackerConfig, ui) + b.runner.Run(state) + + if rawErr, ok := state.GetOk("error"); ok { + return nil, rawErr.(error) + } + + if _, ok := state.GetOk("snapshot_name"); !ok { + log.Println("Failed to find snapshot_name in state. Bug?") + return nil, nil + } + + artifact := &Artifact{ + snapshotName: state.Get("snapshot_name").(string), + snapshotId: state.Get("snapshot_id").(string), + regionName: state.Get("region").(string), + client: client, + } + + return artifact, nil +} + +func (b *Builder) Cancel() { + if b.runner != nil { + log.Println("Cancelling the step runner...") + b.runner.Cancel() + } +} diff --git a/builder/scaleway/builder_test.go b/builder/scaleway/builder_test.go new file mode 100644 index 000000000..ff83e1556 --- /dev/null +++ b/builder/scaleway/builder_test.go @@ -0,0 +1,237 @@ +package scaleway + +import ( + "strconv" + "testing" + + "github.com/hashicorp/packer/packer" +) + +func testConfig() map[string]interface{} { + return map[string]interface{}{ + "api_organization": "foo", + "api_token": "bar", + "region": "ams1", + "commercial_type": "VC1S", + "ssh_username": "root", + "image": "image-uuid", + } +} + +func TestBuilder_ImplementsBuilder(t *testing.T) { + var raw interface{} + raw = &Builder{} + if _, ok := raw.(packer.Builder); !ok { + t.Fatalf("Builder should be a builder") + } +} + +func TestBuilder_Prepare_BadType(t *testing.T) { + b := &Builder{} + c := map[string]interface{}{ + "api_token": []string{}, + } + + warnings, err := b.Prepare(c) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err == nil { + t.Fatalf("prepare should fail") + } +} + +func TestBuilderPrepare_InvalidKey(t *testing.T) { + var b Builder + config := testConfig() + + config["i_should_not_be_valid"] = true + warnings, err := b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err == nil { + t.Fatal("should have error") + } +} + +func TestBuilderPrepare_Region(t *testing.T) { + var b Builder + config := testConfig() + + delete(config, "region") + warnings, err := b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err == nil { + t.Fatalf("should error") + } + + expected := "ams1" + + config["region"] = expected + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + if b.config.Region != expected { + t.Errorf("found %s, expected %s", b.config.Region, expected) + } +} + +func TestBuilderPrepare_CommercialType(t *testing.T) { + var b Builder + config := testConfig() + + delete(config, "commercial_type") + warnings, err := b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err == nil { + t.Fatalf("should error") + } + + expected := "VC1S" + + config["commercial_type"] = expected + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + if b.config.CommercialType != expected { + t.Errorf("found %s, expected %s", b.config.CommercialType, expected) + } +} + +func TestBuilderPrepare_Image(t *testing.T) { + var b Builder + config := testConfig() + + delete(config, "image") + warnings, err := b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err == nil { + t.Fatal("should error") + } + + expected := "cc586e45-5156-4f71-b223-cf406b10dd1c" + + config["image"] = expected + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + if b.config.Image != expected { + t.Errorf("found %s, expected %s", b.config.Image, expected) + } +} + +func TestBuilderPrepare_SnapshotName(t *testing.T) { + var b Builder + config := testConfig() + + warnings, err := b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + if b.config.SnapshotName == "" { + t.Errorf("invalid: %s", b.config.SnapshotName) + } + + config["snapshot_name"] = "foobarbaz" + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + config["snapshot_name"] = "{{timestamp}}" + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + _, err = strconv.ParseInt(b.config.SnapshotName, 0, 0) + if err != nil { + t.Fatalf("failed to parse int in template: %s", err) + } + +} + +func TestBuilderPrepare_ServerName(t *testing.T) { + var b Builder + config := testConfig() + + warnings, err := b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + if b.config.ServerName == "" { + t.Errorf("invalid: %s", b.config.ServerName) + } + + config["server_name"] = "foobar" + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + config["server_name"] = "foobar-{{timestamp}}" + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + config["server_name"] = "foobar-{{" + b = Builder{} + warnings, err = b.Prepare(config) + if len(warnings) > 0 { + t.Fatalf("bad: %#v", warnings) + } + if err == nil { + t.Fatal("should have error") + } + +} diff --git a/builder/scaleway/config.go b/builder/scaleway/config.go new file mode 100644 index 000000000..eb9575140 --- /dev/null +++ b/builder/scaleway/config.go @@ -0,0 +1,103 @@ +package scaleway + +import ( + "errors" + "fmt" + + "github.com/hashicorp/packer/common" + "github.com/hashicorp/packer/common/uuid" + "github.com/hashicorp/packer/helper/communicator" + "github.com/hashicorp/packer/helper/config" + "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/template/interpolate" + "github.com/mitchellh/mapstructure" +) + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + Comm communicator.Config `mapstructure:",squash"` + + Token string `mapstructure:"api_token"` + Organization string `mapstructure:"api_organization"` + + Region string `mapstructure:"region"` + Image string `mapstructure:"image"` + CommercialType string `mapstructure:"commercial_type"` + + SnapshotName string `mapstructure:"snapshot_name"` + ServerName string `mapstructure:"server_name"` + + UserAgent string + ctx interpolate.Context +} + +func NewConfig(raws ...interface{}) (*Config, []string, error) { + c := new(Config) + + var md mapstructure.Metadata + err := config.Decode(c, &config.DecodeOpts{ + Metadata: &md, + Interpolate: true, + InterpolateContext: &c.ctx, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "run_command", + }, + }, + }, raws...) + if err != nil { + return nil, nil, err + } + + c.UserAgent = "Packer - Scaleway builder" + + if c.SnapshotName == "" { + def, err := interpolate.Render("packer-{{timestamp}}", nil) + if err != nil { + panic(err) + } + + c.SnapshotName = def + } + + if c.ServerName == "" { + // Default to packer-[time-ordered-uuid] + c.ServerName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) + } + + var errs *packer.MultiError + if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { + errs = packer.MultiErrorAppend(errs, es...) + } + if c.Organization == "" { + errs = packer.MultiErrorAppend( + errs, errors.New("Scaleway Organization ID must be specified")) + } + + if c.Token == "" { + errs = packer.MultiErrorAppend( + errs, errors.New("Scaleway Token must be specified")) + } + + if c.Region == "" { + errs = packer.MultiErrorAppend( + errs, errors.New("region is required")) + } + + if c.CommercialType == "" { + errs = packer.MultiErrorAppend( + errs, errors.New("commercial type is required")) + } + + if c.Image == "" { + errs = packer.MultiErrorAppend( + errs, errors.New("image is required")) + } + + if errs != nil && len(errs.Errors) > 0 { + return nil, nil, errs + } + + common.ScrubConfig(c, c.Token) + return c, nil, nil +} diff --git a/builder/scaleway/ssh.go b/builder/scaleway/ssh.go new file mode 100644 index 000000000..017138022 --- /dev/null +++ b/builder/scaleway/ssh.go @@ -0,0 +1,30 @@ +package scaleway + +import ( + "fmt" + "golang.org/x/crypto/ssh" + + "github.com/mitchellh/multistep" +) + +func commHost(state multistep.StateBag) (string, error) { + ipAddress := state.Get("server_ip").(string) + return ipAddress, nil +} + +func sshConfig(state multistep.StateBag) (*ssh.ClientConfig, error) { + config := state.Get("config").(Config) + privateKey := state.Get("privateKey").(string) + + signer, err := ssh.ParsePrivateKey([]byte(privateKey)) + if err != nil { + return nil, fmt.Errorf("Error setting up SSH config: %s", err) + } + + return &ssh.ClientConfig{ + User: config.Comm.SSHUsername, + Auth: []ssh.AuthMethod{ + ssh.PublicKeys(signer), + }, + }, nil +} diff --git a/builder/scaleway/step_create_server.go b/builder/scaleway/step_create_server.go new file mode 100644 index 000000000..c961b6110 --- /dev/null +++ b/builder/scaleway/step_create_server.go @@ -0,0 +1,61 @@ +package scaleway + +import ( + "fmt" + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" + "github.com/scaleway/scaleway-cli/pkg/api" + "strings" +) + +type stepCreateServer struct { + serverId string +} + +func (s *stepCreateServer) Run(state multistep.StateBag) multistep.StepAction { + client := state.Get("client").(*api.ScalewayAPI) + ui := state.Get("ui").(packer.Ui) + c := state.Get("config").(Config) + sshPubKey := state.Get("ssh_pubkey").(string) + + ui.Say("Creating server...") + + server, err := client.PostServer(api.ScalewayServerDefinition{ + Name: c.ServerName, + Image: &c.Image, + Organization: c.Organization, + CommercialType: c.CommercialType, + Tags: []string{fmt.Sprintf("AUTHORIZED_KEY=%s", strings.TrimSpace(sshPubKey))}, + }) + + err = client.PostServerAction(server, "poweron") + + if err != nil { + err := fmt.Errorf("Error creating server: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + s.serverId = server + + state.Put("server_id", server) + + return multistep.ActionContinue +} + +func (s *stepCreateServer) Cleanup(state multistep.StateBag) { + if s.serverId != "" { + return + } + + client := state.Get("client").(*api.ScalewayAPI) + ui := state.Get("ui").(packer.Ui) + + ui.Say("Destroying server...") + err := client.PostServerAction(s.serverId, "terminate") + if err != nil { + ui.Error(fmt.Sprintf( + "Error destroying server. Please destroy it manually: %s", err)) + } +} diff --git a/builder/scaleway/step_create_ssh_key.go b/builder/scaleway/step_create_ssh_key.go new file mode 100644 index 000000000..8a3618619 --- /dev/null +++ b/builder/scaleway/step_create_ssh_key.go @@ -0,0 +1,88 @@ +package scaleway + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "log" + "os" + "runtime" + "strings" + + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" + "golang.org/x/crypto/ssh" +) + +type stepCreateSSHKey struct { + Debug bool + DebugKeyPath string +} + +func (s *stepCreateSSHKey) Run(state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + + ui.Say("Creating temporary ssh key for server...") + + priv, err := rsa.GenerateKey(rand.Reader, 2014) + if err != nil { + err := fmt.Errorf("Error creating temporary SSH key: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + // ASN.1 DER encoded form + priv_der := x509.MarshalPKCS1PrivateKey(priv) + priv_blk := pem.Block{ + Type: "RSA PRIVATE KEY", + Headers: nil, + Bytes: priv_der, + } + + // Set the private key in the statebag for later + state.Put("privateKey", string(pem.EncodeToMemory(&priv_blk))) + + pub, _ := ssh.NewPublicKey(&priv.PublicKey) + pub_sshformat := string(ssh.MarshalAuthorizedKey(pub)) + pub_sshformat = strings.Replace(pub_sshformat, " ", "_", -1) + + log.Printf("temporary ssh key created") + + // Remember some state for the future + state.Put("ssh_pubkey", string(pub_sshformat)) + + // If we're in debug mode, output the private key to the working directory. + if s.Debug { + ui.Message(fmt.Sprintf("Saving key for debug purposes: %s", s.DebugKeyPath)) + f, err := os.Create(s.DebugKeyPath) + if err != nil { + state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) + return multistep.ActionHalt + } + defer f.Close() + + // Write the key out + if _, err := f.Write(pem.EncodeToMemory(&priv_blk)); err != nil { + state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) + return multistep.ActionHalt + } + + // Chmod it so that it is SSH ready + if runtime.GOOS != "windows" { + if err := f.Chmod(0600); err != nil { + state.Put("error", fmt.Errorf("Error setting permissions of debug key: %s", err)) + return multistep.ActionHalt + } + } + } + + return multistep.ActionContinue +} + +func (s *stepCreateSSHKey) Cleanup(state multistep.StateBag) { + // SSH key is passed via tag. Nothing to do here. + return +} diff --git a/builder/scaleway/step_server_info.go b/builder/scaleway/step_server_info.go new file mode 100644 index 000000000..38502fe4d --- /dev/null +++ b/builder/scaleway/step_server_info.go @@ -0,0 +1,44 @@ +package scaleway + +import ( + "fmt" + + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +type stepServerInfo struct{} + +func (s *stepServerInfo) Run(state multistep.StateBag) multistep.StepAction { + client := state.Get("client").(*api.ScalewayAPI) + ui := state.Get("ui").(packer.Ui) + serverID := state.Get("server_id").(string) + + ui.Say("Waiting for server to become active...") + + _, err := api.WaitForServerState(client, serverID, "running") + if err != nil { + err := fmt.Errorf("Error waiting for server to become booted: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + server, err := client.GetServer(serverID) + if err != nil { + err := fmt.Errorf("Error retrieving server: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + state.Put("server_ip", server.PublicAddress.IP) + state.Put("root_volume_id", server.Volumes["0"].Identifier) + + return multistep.ActionContinue +} + +func (s *stepServerInfo) Cleanup(state multistep.StateBag) { + // no cleanup +} diff --git a/builder/scaleway/step_shutdown.go b/builder/scaleway/step_shutdown.go new file mode 100644 index 000000000..8d246974d --- /dev/null +++ b/builder/scaleway/step_shutdown.go @@ -0,0 +1,43 @@ +package scaleway + +import ( + "fmt" + + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +type stepShutdown struct{} + +func (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction { + client := state.Get("client").(*api.ScalewayAPI) + ui := state.Get("ui").(packer.Ui) + serverId := state.Get("server_id").(string) + + ui.Say("Shutting down server...") + + err := client.PostServerAction(serverId, "poweroff") + + if err != nil { + err := fmt.Errorf("Error stopping server: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + _, err = api.WaitForServerState(client, serverId, "stopped") + + if err != nil { + err := fmt.Errorf("Error shutting down server: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (s *stepShutdown) Cleanup(state multistep.StateBag) { + // no cleanup +} diff --git a/builder/scaleway/step_snapshot.go b/builder/scaleway/step_snapshot.go new file mode 100644 index 000000000..26bfa6934 --- /dev/null +++ b/builder/scaleway/step_snapshot.go @@ -0,0 +1,48 @@ +package scaleway + +import ( + "fmt" + "log" + + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +type stepSnapshot struct{} + +func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { + client := state.Get("client").(*api.ScalewayAPI) + ui := state.Get("ui").(packer.Ui) + c := state.Get("config").(Config) + volumeId := state.Get("root_volume_id").(string) + + ui.Say(fmt.Sprintf("Creating snapshot: %v", c.SnapshotName)) + snapshot, err := client.PostSnapshot(volumeId, c.SnapshotName) + if err != nil { + err := fmt.Errorf("Error creating snapshot: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + log.Printf("Looking up snapshot ID for snapshot: %s", c.SnapshotName) + _, err = client.GetSnapshot(snapshot) + if err != nil { + err := fmt.Errorf("Error looking up snapshot ID: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + log.Printf("Snapshot ID: %s", snapshot) + state.Put("snapshot_id", snapshot) + state.Put("snapshot_name", c.SnapshotName) + state.Put("region", c.Region) + + return multistep.ActionContinue +} + +func (s *stepSnapshot) Cleanup(state multistep.StateBag) { + // no cleanup +} diff --git a/builder/scaleway/step_terminate.go b/builder/scaleway/step_terminate.go new file mode 100644 index 000000000..0951b6a94 --- /dev/null +++ b/builder/scaleway/step_terminate.go @@ -0,0 +1,34 @@ +package scaleway + +import ( + "fmt" + + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +type stepTerminate struct{} + +func (s *stepTerminate) Run(state multistep.StateBag) multistep.StepAction { + client := state.Get("client").(*api.ScalewayAPI) + ui := state.Get("ui").(packer.Ui) + serverId := state.Get("server_id").(string) + + ui.Say("Terminating server...") + + err := client.DeleteServerForce(serverId) + + if err != nil { + err := fmt.Errorf("Error terminating server: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (s *stepTerminate) Cleanup(state multistep.StateBag) { + // no cleanup +} diff --git a/command/plugin.go b/command/plugin.go index 55cfacc60..60651332d 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -39,6 +39,7 @@ import ( parallelspvmbuilder "github.com/hashicorp/packer/builder/parallels/pvm" profitbricksbuilder "github.com/hashicorp/packer/builder/profitbricks" qemubuilder "github.com/hashicorp/packer/builder/qemu" + scalewaybuilder "github.com/hashicorp/packer/builder/scaleway" tritonbuilder "github.com/hashicorp/packer/builder/triton" virtualboxisobuilder "github.com/hashicorp/packer/builder/virtualbox/iso" virtualboxovfbuilder "github.com/hashicorp/packer/builder/virtualbox/ovf" @@ -108,6 +109,7 @@ var Builders = map[string]packer.Builder{ "parallels-pvm": new(parallelspvmbuilder.Builder), "profitbricks": new(profitbricksbuilder.Builder), "qemu": new(qemubuilder.Builder), + "scaleway": new(scalewaybuilder.Builder), "triton": new(tritonbuilder.Builder), "virtualbox-iso": new(virtualboxisobuilder.Builder), "virtualbox-ovf": new(virtualboxovfbuilder.Builder), diff --git a/post-processor/vagrant-cloud/post-processor.go b/post-processor/vagrant-cloud/post-processor.go index f6c7f19d6..273d2fdb8 100644 --- a/post-processor/vagrant-cloud/post-processor.go +++ b/post-processor/vagrant-cloud/post-processor.go @@ -189,6 +189,8 @@ func providerFromBuilderName(name string) string { switch name { case "aws": return "aws" + case "scaleway": + return "scaleway" case "digitalocean": return "digitalocean" case "virtualbox": diff --git a/post-processor/vagrant/post-processor.go b/post-processor/vagrant/post-processor.go index 310270809..18357923f 100644 --- a/post-processor/vagrant/post-processor.go +++ b/post-processor/vagrant/post-processor.go @@ -26,6 +26,7 @@ var builtins = map[string]string{ "mitchellh.vmware-esx": "vmware", "pearkes.digitalocean": "digitalocean", "packer.googlecompute": "google", + "pearkes.scaleway": "scaleway", "packer.parallels": "parallels", "MSOpenTech.hyperv": "hyperv", "transcend.qemu": "libvirt", @@ -221,6 +222,8 @@ func providerForName(name string) Provider { switch name { case "aws": return new(AWSProvider) + case "scaleway": + return new(ScalewayProvider) case "digitalocean": return new(DigitalOceanProvider) case "virtualbox": diff --git a/post-processor/vagrant/scaleway.go b/post-processor/vagrant/scaleway.go new file mode 100644 index 000000000..879f2b59e --- /dev/null +++ b/post-processor/vagrant/scaleway.go @@ -0,0 +1,52 @@ +package vagrant + +import ( + "bytes" + "fmt" + "github.com/hashicorp/packer/packer" + "strings" + "text/template" +) + +type scalewayVagrantfileTemplate struct { + Image string "" + Region string "" +} + +type ScalewayProvider struct{} + +func (p *ScalewayProvider) KeepInputArtifact() bool { + return true +} + +func (p *ScalewayProvider) Process(ui packer.Ui, artifact packer.Artifact, dir string) (vagrantfile string, metadata map[string]interface{}, err error) { + // Create the metadata + metadata = map[string]interface{}{"provider": "scaleway"} + + // Determine the image and region... + tplData := &scalewayVagrantfileTemplate{} + + parts := strings.Split(artifact.Id(), ":") + if len(parts) != 2 { + err = fmt.Errorf("Poorly formatted artifact ID: %s", artifact.Id()) + return + } + tplData.Region = parts[0] + tplData.Image = parts[1] + + // Build up the Vagrantfile + var contents bytes.Buffer + t := template.Must(template.New("vf").Parse(defaultScalewayVagrantfile)) + err = t.Execute(&contents, tplData) + vagrantfile = contents.String() + return +} + +var defaultScalewayVagrantfile = ` +Vagrant.configure("2") do |config| + config.vm.provider :scaleway do |scaleway| + scaleway.image = "{{ .Image }}" + scaleway.region = "{{ .Region }}" + end +end +` diff --git a/vendor/vendor.json b/vendor/vendor.json index 1ad929935..97a1a27f8 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -232,6 +232,12 @@ "path": "github.com/NaverCloudPlatform/ncloud-sdk-go/sdk", "revision": "c2e73f942591b0f033a3c6df00f44badb2347c38", "revisionTime": "2018-01-10T05:50:12Z" + }, + { + "checksumSHA1": "8dVO3L8yAdQ17X3lAhIziyF3OFk=", + "path": "github.com/Sirupsen/logrus", + "revision": "10f801ebc38b33738c9d17d50860f484a0988ff5", + "revisionTime": "2017-03-17T14:32:14Z" }, { "checksumSHA1": "HttiPj314X1a0i2Jen1p6lRH/vE=", @@ -589,6 +595,12 @@ "path": "github.com/biogo/hts/bgzf", "revision": "50da7d4131a3b5c9d063932461cab4d1fafb20b0" }, + { + "checksumSHA1": "bFj0ceSRvaFFCfmS4el1PjWhcgw=", + "path": "github.com/creack/goselect", + "revision": "1bd5ca702c6154bccc56ecd598932ee8b295cab2", + "revisionTime": "2016-07-14T17:28:59Z" + }, { "checksumSHA1": "Lf3uUXTkKK5DJ37BxQvxO1Fq+K8=", "comment": "v1.0.0-3-g6d21280", @@ -638,6 +650,24 @@ "revision": "4c04abe183f449bd9ede285f0e5c7ee575d0dbe4", "revisionTime": "2017-04-07T15:15:42Z" }, + { + "checksumSHA1": "1n5MBJthemxmfqU2gN3qLCd8s04=", + "path": "github.com/docker/docker/pkg/namesgenerator", + "revision": "fa3e2d5ab9b577cecd24201902bbe72b3f1b851c", + "revisionTime": "2017-04-06T12:40:27Z" + }, + { + "checksumSHA1": "lThih54jzz9A4zHKEFb9SIV3Ed0=", + "path": "github.com/docker/docker/pkg/random", + "revision": "fa3e2d5ab9b577cecd24201902bbe72b3f1b851c", + "revisionTime": "2017-04-06T12:40:27Z" + }, + { + "checksumSHA1": "rhLUtXvcmouYuBwOq9X/nYKzvNg=", + "path": "github.com/dustin/go-humanize", + "revision": "259d2a102b871d17f30e3cd9881a642961a1e486", + "revisionTime": "2017-02-28T07:34:54Z" + }, { "checksumSHA1": "GCskdwYAPW2S34918Z5CgNMJ2Wc=", "path": "github.com/dylanmei/iso8601", @@ -779,6 +809,12 @@ "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", "revisionTime": "2017-06-23T01:44:30Z" }, + { + "checksumSHA1": "xSmii71kfQASGNG2C8ttmHx9KTE=", + "path": "github.com/gorilla/websocket", + "revision": "a91eba7f97777409bc2c443f5534d41dd20c5720", + "revisionTime": "2017-03-19T17:27:27Z" + }, { "checksumSHA1": "izBSRxLAHN+a/XpAku0in05UzlY=", "comment": "20141209094003-92-g95fa852", @@ -1032,6 +1068,18 @@ "path": "github.com/mitchellh/reflectwalk", "revision": "eecf4c70c626c7cfbb95c90195bc34d386c74ac6" }, + { + "checksumSHA1": "KhOVzKefFYORHdIVe+/gNAHB23A=", + "path": "github.com/moul/anonuuid", + "revision": "609b752a95effbbef26d134ac18ed6f57e01b98e", + "revisionTime": "2016-02-22T16:21:17Z" + }, + { + "checksumSHA1": "WcXDSYIAP73RAvy22iD57nE/peI=", + "path": "github.com/moul/gotty-client", + "revision": "99224eea3278d662fce9124bb2bf6c2bb39f5160", + "revisionTime": "2017-02-05T09:54:39Z" + }, { "checksumSHA1": "gcLub3oB+u4QrOJZcYmk/y2AP4k=", "path": "github.com/nu7hatch/gouuid", @@ -1117,6 +1165,12 @@ "revision": "7bdb11aecb0e457ea23c86898c6b49bfc0eb4bb1", "revisionTime": "2017-08-01T13:52:49Z" }, + { + "checksumSHA1": "DF3jZEw4lCq/SEaC7DIl/R+7S70=", + "path": "github.com/renstrom/fuzzysearch/fuzzy", + "revision": "2d205ac6ec17a839a94bdbfd16d2fa6c6dada2e0", + "revisionTime": "2016-03-31T20:48:55Z" + }, { "checksumSHA1": "zmC8/3V4ls53DJlNTKDZwPSC/dA=", "path": "github.com/satori/go.uuid", @@ -1129,6 +1183,24 @@ "revision": "5bf94b69c6b68ee1b541973bb8e1144db23a194b", "revisionTime": "2017-03-21T23:07:31Z" }, + { + "checksumSHA1": "FFhSGe3Y3J1laR/6rwSS7U2esrk=", + "path": "github.com/scaleway/scaleway-cli/pkg/api", + "revision": "e50cb485747a4f25a361c90ef3ba05be79944c56", + "revisionTime": "2017-04-03T16:01:47Z" + }, + { + "checksumSHA1": "kveaAmNlnvmIIuEkFcMlB+N7TqY=", + "path": "github.com/scaleway/scaleway-cli/pkg/sshcommand", + "revision": "e50cb485747a4f25a361c90ef3ba05be79944c56", + "revisionTime": "2017-04-03T16:01:47Z" + }, + { + "checksumSHA1": "xM3G5ct9YYYnVIL3XMRrcf41xVw=", + "path": "github.com/scaleway/scaleway-cli/pkg/utils", + "revision": "e50cb485747a4f25a361c90ef3ba05be79944c56", + "revisionTime": "2017-04-03T16:01:47Z" + }, { "checksumSHA1": "iydUphwYqZRq3WhstEdGsbvBAKs=", "comment": "v1.1.4-4-g976c720", @@ -1281,6 +1353,12 @@ "revision": "453249f01cfeb54c3d549ddb75ff152ca243f9d8", "revisionTime": "2017-02-08T20:51:15Z" }, + { + "checksumSHA1": "xiderUuvye8Kpn7yX3niiJg32bE=", + "path": "golang.org/x/crypto/ssh/terminal", + "revision": "c2303dcbe84172e0c0da4c9f083eeca54c06f298", + "revisionTime": "2017-01-17T19:20:27Z" + }, { "checksumSHA1": "GtamqiJoL7PGHsN454AoffBFMa8=", "path": "golang.org/x/net/context", @@ -1342,6 +1420,12 @@ "path": "golang.org/x/oauth2/jwt", "revision": "8a57ed94ffd43444c0879fe75701732a38afc985" }, + { + "checksumSHA1": "S0DP7Pn7sZUmXc55IzZnNvERu6s=", + "path": "golang.org/x/sync/errgroup", + "revision": "5a06fca2c336a4b2b2fcb45702e8c47621b2aa2c", + "revisionTime": "2017-03-17T17:13:11Z" + }, { "checksumSHA1": "NzQ3QYllWwK+3GZliu11jMU6xwo=", "path": "golang.org/x/sys/unix", From 5a2f37896ebd2ae4db41ceb4b1ad11809268534a Mon Sep 17 00:00:00 2001 From: Edouard BONLIEU Date: Thu, 6 Apr 2017 16:08:54 +0200 Subject: [PATCH 211/251] Add documentation --- website/source/docs/builders/scaleway.html.md | 77 +++++++++++++++++++ website/source/layouts/docs.erb | 3 + 2 files changed, 80 insertions(+) create mode 100644 website/source/docs/builders/scaleway.html.md diff --git a/website/source/docs/builders/scaleway.html.md b/website/source/docs/builders/scaleway.html.md new file mode 100644 index 000000000..01852c70c --- /dev/null +++ b/website/source/docs/builders/scaleway.html.md @@ -0,0 +1,77 @@ +--- +layout: docs +sidebar_current: docs-builders-scaleway +page_title: Scaleway - Builders +description: |- + The Scaleway Packer builder is able to create new snapshots for use with + Scaleway BareMetal and Virtual cloud server. The builder takes a source image, runs any provisioning + necessary on the image after launching it, then snapshots it into a reusable + image. This reusable image can then be used as the foundation of new servers + that are launched within Scaleway. +--- + + +# Scaleway Builder + +Type: `scaleway` + +The `scaleway` Packer builder is able to create new snapshots for use with +[Scaleway](https://www.scaleway.com). The builder takes a source image, +runs any provisioning necessary on the image after launching it, then snapshots +it into a reusable image. This reusable image can then be used as the foundation +of new servers that are launched within Scaleway. + +The builder does *not* manage snapshots. Once it creates an image, it is up to you +to use it or delete it. + +## Configuration Reference + +There are many configuration options available for the builder. They are +segmented below into two categories: required and optional parameters. Within +each category, the available configuration keys are alphabetized. + +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) can be configured for this +builder. + +### Required: + +- `api_organization` (string) - The organization ID to use to access your account. + +- `api_token` (string) - The organization TOKEN to use to access your account. + +- `image` (string) - The UUID of the base image to use. This is the + image that will be used to launch a new server and provision it. See + [https://api-marketplace.scaleway.com/images](https://api-marketplace.scaleway.com/images) to + get the complete list of the accepted image UUID. + +- `region` (string) - The name of the region to launch the + server in (`par1` or `ams1`). Consequently, this is the region where the snapshot will + be available. + +- `commercial_type` (string) - The name of the server commercial type: `C1`, `C2S`, `C2M`, + `C2L`, `X64-2GB`, `X64-4GB`, `X64-8GB`, `X64-15GB`, `X64-30GB`, `X64-60GB`, `X64-120GB` + +### Optional: + +- `server_name` (string) - The name assigned to the server. + +- `snapshot_name` (string) - The name of the resulting snapshot that will + appear in your account. + +## Basic Example + +Here is a basic example. It is completely valid as soon as you enter your own +access tokens: + +```json +{ + "type": "scaleway", + "api_organization": "YOUR ORGANIZATION KEY", + "api_token": "YOUR TOKEN", + "image": "f01f8a48-c026-48ac-9771-a70eaac0890e", + "region": "par1", + "commercial_type": "X64-2GB", + "ssh_username": "root" +} +``` diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 7f12d326c..271de572f 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -90,6 +90,9 @@ + > + Scaleway + > CloudStack From e46108298ccf90d155630f77d74d328142cebd60 Mon Sep 17 00:00:00 2001 From: Edouard BONLIEU Date: Thu, 6 Apr 2017 16:31:35 +0200 Subject: [PATCH 212/251] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 5249f515e..1c30fd877 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,7 @@ comes out of the box with support for the following platforms: * Parallels * ProfitBricks * QEMU. Both KVM and Xen images. +* Scaleway * Triton (Joyent Public Cloud) * VMware * VirtualBox From 9b611af7e6dd6e32b823751b47bbc8ea5593e769 Mon Sep 17 00:00:00 2001 From: Edouard BONLIEU Date: Thu, 6 Apr 2017 16:37:06 +0200 Subject: [PATCH 213/251] Allow token and organization id to be passed via env vars --- builder/scaleway/config.go | 9 +++++++++ website/source/docs/builders/scaleway.html.md | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/builder/scaleway/config.go b/builder/scaleway/config.go index eb9575140..040b39891 100644 --- a/builder/scaleway/config.go +++ b/builder/scaleway/config.go @@ -3,6 +3,7 @@ package scaleway import ( "errors" "fmt" + "os" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/common/uuid" @@ -51,6 +52,14 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { c.UserAgent = "Packer - Scaleway builder" + if c.Organization == "" { + c.Organization = os.Getenv("SCALEWAY_API_ORGANIZATION") + } + + if c.Token == "" { + c.Token = os.Getenv("SCALEWAY_API_TOKEN") + } + if c.SnapshotName == "" { def, err := interpolate.Render("packer-{{timestamp}}", nil) if err != nil { diff --git a/website/source/docs/builders/scaleway.html.md b/website/source/docs/builders/scaleway.html.md index 01852c70c..1df9dd6ec 100644 --- a/website/source/docs/builders/scaleway.html.md +++ b/website/source/docs/builders/scaleway.html.md @@ -37,8 +37,12 @@ builder. ### Required: - `api_organization` (string) - The organization ID to use to access your account. + It can also be specified via + environment variable `SCALEWAY_API_ORGANIZATION`. - `api_token` (string) - The organization TOKEN to use to access your account. + It can also be specified via + environment variable `SCALEWAY_API_TOKEN`. - `image` (string) - The UUID of the base image to use. This is the image that will be used to launch a new server and provision it. See From 1fb13cc23eeebf7e3c57f47657d663dc6a13e21c Mon Sep 17 00:00:00 2001 From: Edouard BONLIEU Date: Tue, 11 Apr 2017 12:19:28 +0200 Subject: [PATCH 214/251] Add image creation from snapshot Rename organization_id / access_key Update test / doc --- builder/scaleway/artifact.go | 25 ++++++--- builder/scaleway/artifact_test.go | 8 +-- builder/scaleway/builder.go | 5 +- builder/scaleway/builder_test.go | 12 ++--- builder/scaleway/config.go | 16 ++++-- builder/scaleway/step_create_image.go | 53 +++++++++++++++++++ builder/scaleway/step_create_server.go | 11 ++-- builder/scaleway/step_shutdown.go | 6 +-- builder/scaleway/step_snapshot.go | 13 +---- builder/scaleway/step_terminate.go | 4 +- website/source/docs/builders/scaleway.html.md | 15 ++++-- 11 files changed, 122 insertions(+), 46 deletions(-) create mode 100644 builder/scaleway/step_create_image.go diff --git a/builder/scaleway/artifact.go b/builder/scaleway/artifact.go index fe1efb96f..b5aea88da 100644 --- a/builder/scaleway/artifact.go +++ b/builder/scaleway/artifact.go @@ -8,11 +8,17 @@ import ( ) type Artifact struct { + // The name of the image + imageName string + + // The ID of the image + imageID string + // The name of the snapshot snapshotName string // The ID of the snapshot - snapshotId string + snapshotID string // The name of the region regionName string @@ -31,11 +37,12 @@ func (*Artifact) Files() []string { } func (a *Artifact) Id() string { - return fmt.Sprintf("%s:%s", a.regionName, a.snapshotId) + return fmt.Sprintf("%s:%s", a.regionName, a.imageID) } func (a *Artifact) String() string { - return fmt.Sprintf("A snapshot was created: '%v' (ID: %v) in region '%v'", a.snapshotName, a.snapshotId, a.regionName) + return fmt.Sprintf("An image was created: '%v' (ID: %v) in region '%v' based on snapshot '%v' (ID: %v)", + a.imageName, a.imageID, a.regionName, a.snapshotName, a.snapshotID) } func (a *Artifact) State(name string) interface{} { @@ -43,7 +50,13 @@ func (a *Artifact) State(name string) interface{} { } func (a *Artifact) Destroy() error { - log.Printf("Destroying image: %s (%s)", a.snapshotId, a.snapshotName) - err := a.client.DeleteSnapshot(a.snapshotId) - return err + log.Printf("Destroying image: %s (%s)", a.imageID, a.imageName) + if err := a.client.DeleteImage(a.imageID); err != nil { + return err + } + log.Printf("Destroying snapshot: %s (%s)", a.snapshotID, a.snapshotName) + if err := a.client.DeleteSnapshot(a.snapshotID); err != nil { + return err + } + return nil } diff --git a/builder/scaleway/artifact_test.go b/builder/scaleway/artifact_test.go index 8805ad686..8f158ef5f 100644 --- a/builder/scaleway/artifact_test.go +++ b/builder/scaleway/artifact_test.go @@ -15,8 +15,8 @@ func TestArtifact_Impl(t *testing.T) { } func TestArtifactId(t *testing.T) { - a := &Artifact{"packer-foobar", "cc586e45-5156-4f71-b223-cf406b10dd1c", "ams1", nil} - expected := "ams1:cc586e45-5156-4f71-b223-cf406b10dd1c" + a := &Artifact{"packer-foobar-image", "cc586e45-5156-4f71-b223-cf406b10dd1d", "packer-foobar-snapshot", "cc586e45-5156-4f71-b223-cf406b10dd1c", "ams1", nil} + expected := "ams1:cc586e45-5156-4f71-b223-cf406b10dd1d" if a.Id() != expected { t.Fatalf("artifact ID should match: %v", expected) @@ -24,8 +24,8 @@ func TestArtifactId(t *testing.T) { } func TestArtifactString(t *testing.T) { - a := &Artifact{"packer-foobar", "cc586e45-5156-4f71-b223-cf406b10dd1c", "ams1", nil} - expected := "A snapshot was created: 'packer-foobar' (ID: cc586e45-5156-4f71-b223-cf406b10dd1c) in region 'ams1'" + a := &Artifact{"packer-foobar-image", "cc586e45-5156-4f71-b223-cf406b10dd1d", "packer-foobar-snapshot", "cc586e45-5156-4f71-b223-cf406b10dd1c", "ams1", nil} + expected := "An image was created: 'packer-foobar-image' (ID: cc586e45-5156-4f71-b223-cf406b10dd1d) in region 'ams1' based on snapshot 'packer-foobar-snapshot' (ID: cc586e45-5156-4f71-b223-cf406b10dd1c)" if a.String() != expected { t.Fatalf("artifact string should match: %v", expected) diff --git a/builder/scaleway/builder.go b/builder/scaleway/builder.go index 2c92e31b6..e6265395a 100644 --- a/builder/scaleway/builder.go +++ b/builder/scaleway/builder.go @@ -56,6 +56,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe new(common.StepProvision), new(stepShutdown), new(stepSnapshot), + new(stepImage), new(stepTerminate), } @@ -72,8 +73,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe } artifact := &Artifact{ + imageName: state.Get("image_name").(string), + imageID: state.Get("image_id").(string), snapshotName: state.Get("snapshot_name").(string), - snapshotId: state.Get("snapshot_id").(string), + snapshotID: state.Get("snapshot_id").(string), regionName: state.Get("region").(string), client: client, } diff --git a/builder/scaleway/builder_test.go b/builder/scaleway/builder_test.go index ff83e1556..230c9c0c6 100644 --- a/builder/scaleway/builder_test.go +++ b/builder/scaleway/builder_test.go @@ -9,12 +9,12 @@ import ( func testConfig() map[string]interface{} { return map[string]interface{}{ - "api_organization": "foo", - "api_token": "bar", - "region": "ams1", - "commercial_type": "VC1S", - "ssh_username": "root", - "image": "image-uuid", + "api_access_key": "foo", + "api_token": "bar", + "region": "ams1", + "commercial_type": "VC1S", + "ssh_username": "root", + "image": "image-uuid", } } diff --git a/builder/scaleway/config.go b/builder/scaleway/config.go index 040b39891..f965e1ce0 100644 --- a/builder/scaleway/config.go +++ b/builder/scaleway/config.go @@ -19,13 +19,14 @@ type Config struct { Comm communicator.Config `mapstructure:",squash"` Token string `mapstructure:"api_token"` - Organization string `mapstructure:"api_organization"` + Organization string `mapstructure:"api_access_key"` Region string `mapstructure:"region"` Image string `mapstructure:"image"` CommercialType string `mapstructure:"commercial_type"` SnapshotName string `mapstructure:"snapshot_name"` + ImageName string `mapstructure:"image_name"` ServerName string `mapstructure:"server_name"` UserAgent string @@ -53,7 +54,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { c.UserAgent = "Packer - Scaleway builder" if c.Organization == "" { - c.Organization = os.Getenv("SCALEWAY_API_ORGANIZATION") + c.Organization = os.Getenv("SCALEWAY_API_ACCESS_KEY") } if c.Token == "" { @@ -61,7 +62,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { } if c.SnapshotName == "" { - def, err := interpolate.Render("packer-{{timestamp}}", nil) + def, err := interpolate.Render("snapshot-packer-{{timestamp}}", nil) if err != nil { panic(err) } @@ -69,6 +70,15 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { c.SnapshotName = def } + if c.ImageName == "" { + def, err := interpolate.Render("image-packer-{{timestamp}}", nil) + if err != nil { + panic(err) + } + + c.ImageName = def + } + if c.ServerName == "" { // Default to packer-[time-ordered-uuid] c.ServerName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) diff --git a/builder/scaleway/step_create_image.go b/builder/scaleway/step_create_image.go new file mode 100644 index 000000000..313f2e421 --- /dev/null +++ b/builder/scaleway/step_create_image.go @@ -0,0 +1,53 @@ +package scaleway + +import ( + "fmt" + "log" + + "github.com/hashicorp/packer/packer" + "github.com/mitchellh/multistep" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +type stepImage struct{} + +func (s *stepImage) Run(state multistep.StateBag) multistep.StepAction { + client := state.Get("client").(*api.ScalewayAPI) + ui := state.Get("ui").(packer.Ui) + c := state.Get("config").(Config) + snapshotID := state.Get("snapshot_id").(string) + bootscriptID := "" + + ui.Say(fmt.Sprintf("Creating image: %v", c.ImageName)) + + image, err := client.GetImage(c.Image) + if err != nil { + err := fmt.Errorf("Error getting initial image info: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + if image.DefaultBootscript != nil { + bootscriptID = image.DefaultBootscript.Identifier + } + + imageID, err := client.PostImage(snapshotID, c.ImageName, bootscriptID, image.Arch) + if err != nil { + err := fmt.Errorf("Error creating image: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + log.Printf("Image ID: %s", imageID) + state.Put("image_id", imageID) + state.Put("image_name", c.ImageName) + state.Put("region", c.Region) + + return multistep.ActionContinue +} + +func (s *stepImage) Cleanup(state multistep.StateBag) { + // no cleanup +} diff --git a/builder/scaleway/step_create_server.go b/builder/scaleway/step_create_server.go index c961b6110..e1e8b88c6 100644 --- a/builder/scaleway/step_create_server.go +++ b/builder/scaleway/step_create_server.go @@ -2,14 +2,15 @@ package scaleway import ( "fmt" + "strings" + "github.com/hashicorp/packer/packer" "github.com/mitchellh/multistep" "github.com/scaleway/scaleway-cli/pkg/api" - "strings" ) type stepCreateServer struct { - serverId string + serverID string } func (s *stepCreateServer) Run(state multistep.StateBag) multistep.StepAction { @@ -37,7 +38,7 @@ func (s *stepCreateServer) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - s.serverId = server + s.serverID = server state.Put("server_id", server) @@ -45,7 +46,7 @@ func (s *stepCreateServer) Run(state multistep.StateBag) multistep.StepAction { } func (s *stepCreateServer) Cleanup(state multistep.StateBag) { - if s.serverId != "" { + if s.serverID != "" { return } @@ -53,7 +54,7 @@ func (s *stepCreateServer) Cleanup(state multistep.StateBag) { ui := state.Get("ui").(packer.Ui) ui.Say("Destroying server...") - err := client.PostServerAction(s.serverId, "terminate") + err := client.PostServerAction(s.serverID, "terminate") if err != nil { ui.Error(fmt.Sprintf( "Error destroying server. Please destroy it manually: %s", err)) diff --git a/builder/scaleway/step_shutdown.go b/builder/scaleway/step_shutdown.go index 8d246974d..b8e45be51 100644 --- a/builder/scaleway/step_shutdown.go +++ b/builder/scaleway/step_shutdown.go @@ -13,11 +13,11 @@ type stepShutdown struct{} func (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction { client := state.Get("client").(*api.ScalewayAPI) ui := state.Get("ui").(packer.Ui) - serverId := state.Get("server_id").(string) + serverID := state.Get("server_id").(string) ui.Say("Shutting down server...") - err := client.PostServerAction(serverId, "poweroff") + err := client.PostServerAction(serverID, "poweroff") if err != nil { err := fmt.Errorf("Error stopping server: %s", err) @@ -26,7 +26,7 @@ func (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - _, err = api.WaitForServerState(client, serverId, "stopped") + _, err = api.WaitForServerState(client, serverID, "stopped") if err != nil { err := fmt.Errorf("Error shutting down server: %s", err) diff --git a/builder/scaleway/step_snapshot.go b/builder/scaleway/step_snapshot.go index 26bfa6934..20301a444 100644 --- a/builder/scaleway/step_snapshot.go +++ b/builder/scaleway/step_snapshot.go @@ -15,10 +15,10 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { client := state.Get("client").(*api.ScalewayAPI) ui := state.Get("ui").(packer.Ui) c := state.Get("config").(Config) - volumeId := state.Get("root_volume_id").(string) + volumeID := state.Get("root_volume_id").(string) ui.Say(fmt.Sprintf("Creating snapshot: %v", c.SnapshotName)) - snapshot, err := client.PostSnapshot(volumeId, c.SnapshotName) + snapshot, err := client.PostSnapshot(volumeID, c.SnapshotName) if err != nil { err := fmt.Errorf("Error creating snapshot: %s", err) state.Put("error", err) @@ -26,15 +26,6 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - log.Printf("Looking up snapshot ID for snapshot: %s", c.SnapshotName) - _, err = client.GetSnapshot(snapshot) - if err != nil { - err := fmt.Errorf("Error looking up snapshot ID: %s", err) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt - } - log.Printf("Snapshot ID: %s", snapshot) state.Put("snapshot_id", snapshot) state.Put("snapshot_name", c.SnapshotName) diff --git a/builder/scaleway/step_terminate.go b/builder/scaleway/step_terminate.go index 0951b6a94..154750931 100644 --- a/builder/scaleway/step_terminate.go +++ b/builder/scaleway/step_terminate.go @@ -13,11 +13,11 @@ type stepTerminate struct{} func (s *stepTerminate) Run(state multistep.StateBag) multistep.StepAction { client := state.Get("client").(*api.ScalewayAPI) ui := state.Get("ui").(packer.Ui) - serverId := state.Get("server_id").(string) + serverID := state.Get("server_id").(string) ui.Say("Terminating server...") - err := client.DeleteServerForce(serverId) + err := client.DeleteServerForce(serverID) if err != nil { err := fmt.Errorf("Error terminating server: %s", err) diff --git a/website/source/docs/builders/scaleway.html.md b/website/source/docs/builders/scaleway.html.md index 1df9dd6ec..4e0a513fc 100644 --- a/website/source/docs/builders/scaleway.html.md +++ b/website/source/docs/builders/scaleway.html.md @@ -3,7 +3,7 @@ layout: docs sidebar_current: docs-builders-scaleway page_title: Scaleway - Builders description: |- - The Scaleway Packer builder is able to create new snapshots for use with + The Scaleway Packer builder is able to create new images for use with Scaleway BareMetal and Virtual cloud server. The builder takes a source image, runs any provisioning necessary on the image after launching it, then snapshots it into a reusable image. This reusable image can then be used as the foundation of new servers @@ -15,7 +15,7 @@ description: |- Type: `scaleway` -The `scaleway` Packer builder is able to create new snapshots for use with +The `scaleway` Packer builder is able to create new images for use with [Scaleway](https://www.scaleway.com). The builder takes a source image, runs any provisioning necessary on the image after launching it, then snapshots it into a reusable image. This reusable image can then be used as the foundation @@ -36,13 +36,15 @@ builder. ### Required: -- `api_organization` (string) - The organization ID to use to access your account. +- `api_access_key` (string) - The api_access_key to use to access your account. It can also be specified via - environment variable `SCALEWAY_API_ORGANIZATION`. + environment variable `SCALEWAY_API_ACCESS_KEY`. + Your access key is available in the ["Credentials" section](https://cloud.scaleway.com/#/credentials) of the control panel. - `api_token` (string) - The organization TOKEN to use to access your account. It can also be specified via environment variable `SCALEWAY_API_TOKEN`. + Your tokens are available in the ["Credentials" section](https://cloud.scaleway.com/#/credentials) of the control panel. - `image` (string) - The UUID of the base image to use. This is the image that will be used to launch a new server and provision it. See @@ -60,6 +62,9 @@ builder. - `server_name` (string) - The name assigned to the server. +- `image_name` (string) - The name of the resulting image that will + appear in your account. + - `snapshot_name` (string) - The name of the resulting snapshot that will appear in your account. @@ -71,7 +76,7 @@ access tokens: ```json { "type": "scaleway", - "api_organization": "YOUR ORGANIZATION KEY", + "api_access_key": "YOUR API ACCESS KEY", "api_token": "YOUR TOKEN", "image": "f01f8a48-c026-48ac-9771-a70eaac0890e", "region": "par1", From 2de93c5ae699838cccb9720f912fc55a2a3e8c9b Mon Sep 17 00:00:00 2001 From: Edouard BONLIEU Date: Wed, 19 Apr 2017 11:10:52 +0200 Subject: [PATCH 215/251] Add existing SSH key support Update documentation --- builder/scaleway/builder.go | 5 +++-- builder/scaleway/step_create_server.go | 7 ++++++- builder/scaleway/step_create_ssh_key.go | 21 +++++++++++++++++-- website/source/docs/builders/scaleway.html.md | 8 ++++++- 4 files changed, 35 insertions(+), 6 deletions(-) diff --git a/builder/scaleway/builder.go b/builder/scaleway/builder.go index e6265395a..2277808e0 100644 --- a/builder/scaleway/builder.go +++ b/builder/scaleway/builder.go @@ -43,8 +43,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe steps := []multistep.Step{ &stepCreateSSHKey{ - Debug: b.config.PackerDebug, - DebugKeyPath: fmt.Sprintf("scw_%s.pem", b.config.PackerBuildName), + Debug: b.config.PackerDebug, + DebugKeyPath: fmt.Sprintf("scw_%s.pem", b.config.PackerBuildName), + PrivateKeyFile: b.config.Comm.SSHPrivateKey, }, new(stepCreateServer), new(stepServerInfo), diff --git a/builder/scaleway/step_create_server.go b/builder/scaleway/step_create_server.go index e1e8b88c6..3cf46e1ed 100644 --- a/builder/scaleway/step_create_server.go +++ b/builder/scaleway/step_create_server.go @@ -18,15 +18,20 @@ func (s *stepCreateServer) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) c := state.Get("config").(Config) sshPubKey := state.Get("ssh_pubkey").(string) + tags := []string{} ui.Say("Creating server...") + if sshPubKey != "" { + tags = []string{fmt.Sprintf("AUTHORIZED_KEY=%s", strings.TrimSpace(sshPubKey))} + } + server, err := client.PostServer(api.ScalewayServerDefinition{ Name: c.ServerName, Image: &c.Image, Organization: c.Organization, CommercialType: c.CommercialType, - Tags: []string{fmt.Sprintf("AUTHORIZED_KEY=%s", strings.TrimSpace(sshPubKey))}, + Tags: tags, }) err = client.PostServerAction(server, "poweron") diff --git a/builder/scaleway/step_create_ssh_key.go b/builder/scaleway/step_create_ssh_key.go index 8a3618619..dab8f710b 100644 --- a/builder/scaleway/step_create_ssh_key.go +++ b/builder/scaleway/step_create_ssh_key.go @@ -6,6 +6,7 @@ import ( "crypto/x509" "encoding/pem" "fmt" + "io/ioutil" "log" "os" "runtime" @@ -17,13 +18,29 @@ import ( ) type stepCreateSSHKey struct { - Debug bool - DebugKeyPath string + Debug bool + DebugKeyPath string + PrivateKeyFile string } func (s *stepCreateSSHKey) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) + if s.PrivateKeyFile != "" { + ui.Say("Using existing SSH private key") + privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile) + if err != nil { + state.Put("error", fmt.Errorf( + "Error loading configured private key file: %s", err)) + return multistep.ActionHalt + } + + state.Put("privateKey", string(privateKeyBytes)) + state.Put("ssh_pubkey", "") + + return multistep.ActionContinue + } + ui.Say("Creating temporary ssh key for server...") priv, err := rsa.GenerateKey(rand.Reader, 2014) diff --git a/website/source/docs/builders/scaleway.html.md b/website/source/docs/builders/scaleway.html.md index 4e0a513fc..fa1edd827 100644 --- a/website/source/docs/builders/scaleway.html.md +++ b/website/source/docs/builders/scaleway.html.md @@ -68,6 +68,8 @@ builder. - `snapshot_name` (string) - The name of the resulting snapshot that will appear in your account. +- `ssh_private_key_file` (string) - Path to a PEM encoded private key file to use to authentiate with SSH. + ## Basic Example Here is a basic example. It is completely valid as soon as you enter your own @@ -81,6 +83,10 @@ access tokens: "image": "f01f8a48-c026-48ac-9771-a70eaac0890e", "region": "par1", "commercial_type": "X64-2GB", - "ssh_username": "root" + "ssh_username": "root", + "ssh_private_key_file": "~/.ssh/id_rsa", } ``` + +When you do not specified the `ssh_private_key_file`, a temporarily SSH keypair is generated to connect the server. +This key will only allows the `root` user to connect the server. \ No newline at end of file From edf9dd15174b08c13c98cafbf1085431521c632d Mon Sep 17 00:00:00 2001 From: Edouard BONLIEU Date: Wed, 19 Apr 2017 11:37:57 +0200 Subject: [PATCH 216/251] Fix doc --- website/source/docs/builders/scaleway.html.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/website/source/docs/builders/scaleway.html.md b/website/source/docs/builders/scaleway.html.md index fa1edd827..88f14a155 100644 --- a/website/source/docs/builders/scaleway.html.md +++ b/website/source/docs/builders/scaleway.html.md @@ -68,8 +68,6 @@ builder. - `snapshot_name` (string) - The name of the resulting snapshot that will appear in your account. -- `ssh_private_key_file` (string) - Path to a PEM encoded private key file to use to authentiate with SSH. - ## Basic Example Here is a basic example. It is completely valid as soon as you enter your own From 09805911b44b6eabcddd72e0d8adfea9844c086f Mon Sep 17 00:00:00 2001 From: Edouard BONLIEU Date: Tue, 11 Jul 2017 08:43:04 +0200 Subject: [PATCH 217/251] Fix builder unique id Add new ARM64 commercial types DOC - Add default value for optional settings DOC - Fix typo --- builder/scaleway/builder.go | 2 +- website/source/docs/builders/scaleway.html.md | 13 +++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/builder/scaleway/builder.go b/builder/scaleway/builder.go index 2277808e0..14fee93a0 100644 --- a/builder/scaleway/builder.go +++ b/builder/scaleway/builder.go @@ -15,7 +15,7 @@ import ( ) // The unique id for the builder -const BuilderId = "pearkes.scaleway" +const BuilderId = "hashicorp.scaleway" type Builder struct { config Config diff --git a/website/source/docs/builders/scaleway.html.md b/website/source/docs/builders/scaleway.html.md index 88f14a155..e045f27bd 100644 --- a/website/source/docs/builders/scaleway.html.md +++ b/website/source/docs/builders/scaleway.html.md @@ -56,17 +56,17 @@ builder. be available. - `commercial_type` (string) - The name of the server commercial type: `C1`, `C2S`, `C2M`, - `C2L`, `X64-2GB`, `X64-4GB`, `X64-8GB`, `X64-15GB`, `X64-30GB`, `X64-60GB`, `X64-120GB` + `C2L`, `X64-2GB`, `X64-4GB`, `X64-8GB`, `X64-15GB`, `X64-30GB`, `X64-60GB`, `X64-120GB`, `ARM64-2GB`, `ARM64-4GB`, `ARM64-8GB`, `ARM64-16GB`, `ARM64-32GB`, `ARM64-64GB`, `ARM64-128GB` ### Optional: -- `server_name` (string) - The name assigned to the server. +- `server_name` (string) - The name assigned to the server. Default `packer-UUID` - `image_name` (string) - The name of the resulting image that will - appear in your account. + appear in your account. Default `packer-TIMESTAMP` - `snapshot_name` (string) - The name of the resulting snapshot that will - appear in your account. + appear in your account. Default `packer-TIMESTAMP ## Basic Example @@ -78,13 +78,14 @@ access tokens: "type": "scaleway", "api_access_key": "YOUR API ACCESS KEY", "api_token": "YOUR TOKEN", - "image": "f01f8a48-c026-48ac-9771-a70eaac0890e", + "image": "UUID OF THE BASE IMAGE", "region": "par1", "commercial_type": "X64-2GB", "ssh_username": "root", "ssh_private_key_file": "~/.ssh/id_rsa", + Extra, } ``` When you do not specified the `ssh_private_key_file`, a temporarily SSH keypair is generated to connect the server. -This key will only allows the `root` user to connect the server. \ No newline at end of file +This key will only allow the `root` user to connect the server. From ae18995ca1e7a4c038b2f26c012359ff15cfac63 Mon Sep 17 00:00:00 2001 From: Edouard BONLIEU Date: Tue, 11 Jul 2017 08:46:55 +0200 Subject: [PATCH 218/251] Fix builder id --- post-processor/vagrant/post-processor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/post-processor/vagrant/post-processor.go b/post-processor/vagrant/post-processor.go index 18357923f..f3bb6e346 100644 --- a/post-processor/vagrant/post-processor.go +++ b/post-processor/vagrant/post-processor.go @@ -26,7 +26,7 @@ var builtins = map[string]string{ "mitchellh.vmware-esx": "vmware", "pearkes.digitalocean": "digitalocean", "packer.googlecompute": "google", - "pearkes.scaleway": "scaleway", + "hashicorp.scaleway": "scaleway", "packer.parallels": "parallels", "MSOpenTech.hyperv": "hyperv", "transcend.qemu": "libvirt", From 520433c0b8ea5f9fc123e620109933bada6c9351 Mon Sep 17 00:00:00 2001 From: Edouard BONLIEU Date: Tue, 11 Jul 2017 14:06:53 +0200 Subject: [PATCH 219/251] Cleanup documentation --- website/source/docs/builders/scaleway.html.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/website/source/docs/builders/scaleway.html.md b/website/source/docs/builders/scaleway.html.md index e045f27bd..efa26dbee 100644 --- a/website/source/docs/builders/scaleway.html.md +++ b/website/source/docs/builders/scaleway.html.md @@ -48,7 +48,7 @@ builder. - `image` (string) - The UUID of the base image to use. This is the image that will be used to launch a new server and provision it. See - [https://api-marketplace.scaleway.com/images](https://api-marketplace.scaleway.com/images) to + [https://api-marketplace.scaleway.com/images](https://api-marketplace.scaleway.com/images) get the complete list of the accepted image UUID. - `region` (string) - The name of the region to launch the @@ -66,7 +66,7 @@ builder. appear in your account. Default `packer-TIMESTAMP` - `snapshot_name` (string) - The name of the resulting snapshot that will - appear in your account. Default `packer-TIMESTAMP + appear in your account. Default `packer-TIMESTAMP` ## Basic Example @@ -82,8 +82,7 @@ access tokens: "region": "par1", "commercial_type": "X64-2GB", "ssh_username": "root", - "ssh_private_key_file": "~/.ssh/id_rsa", - Extra, + "ssh_private_key_file": "~/.ssh/id_rsa" } ``` From b44798b38deb5a9ac86e3e153269d36587c7ba4d Mon Sep 17 00:00:00 2001 From: Edouard BONLIEU Date: Tue, 11 Jul 2017 16:15:00 +0200 Subject: [PATCH 220/251] Raise error in case of create server failure --- builder/scaleway/step_create_server.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/builder/scaleway/step_create_server.go b/builder/scaleway/step_create_server.go index 3cf46e1ed..11fe284ab 100644 --- a/builder/scaleway/step_create_server.go +++ b/builder/scaleway/step_create_server.go @@ -34,10 +34,17 @@ func (s *stepCreateServer) Run(state multistep.StateBag) multistep.StepAction { Tags: tags, }) + if err != nil { + err := fmt.Errorf("Error creating server: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + err = client.PostServerAction(server, "poweron") if err != nil { - err := fmt.Errorf("Error creating server: %s", err) + err := fmt.Errorf("Error starting server: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt @@ -51,7 +58,7 @@ func (s *stepCreateServer) Run(state multistep.StateBag) multistep.StepAction { } func (s *stepCreateServer) Cleanup(state multistep.StateBag) { - if s.serverID != "" { + if s.serverID == "" { return } From eb56b1b70e38b3c0dd3f0850321a3ab0822cd37c Mon Sep 17 00:00:00 2001 From: Edouard BONLIEU Date: Fri, 21 Jul 2017 12:26:20 +0200 Subject: [PATCH 221/251] Fix terminate error --- builder/scaleway/builder.go | 7 ++++-- builder/scaleway/step_create_server.go | 5 +++- builder/scaleway/step_terminate.go | 34 -------------------------- 3 files changed, 9 insertions(+), 37 deletions(-) delete mode 100644 builder/scaleway/step_terminate.go diff --git a/builder/scaleway/builder.go b/builder/scaleway/builder.go index 14fee93a0..f2266576e 100644 --- a/builder/scaleway/builder.go +++ b/builder/scaleway/builder.go @@ -33,7 +33,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { - client, _ := api.NewScalewayAPI(b.config.Organization, b.config.Token, b.config.UserAgent, b.config.Region) + client, err := api.NewScalewayAPI(b.config.Organization, b.config.Token, b.config.UserAgent, b.config.Region) + + if err != nil { + return nil, err + } state := new(multistep.BasicStateBag) state.Put("config", b.config) @@ -58,7 +62,6 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe new(stepShutdown), new(stepSnapshot), new(stepImage), - new(stepTerminate), } b.runner = common.NewRunner(steps, b.config.PackerConfig, ui) diff --git a/builder/scaleway/step_create_server.go b/builder/scaleway/step_create_server.go index 11fe284ab..ddd06fdf5 100644 --- a/builder/scaleway/step_create_server.go +++ b/builder/scaleway/step_create_server.go @@ -66,9 +66,12 @@ func (s *stepCreateServer) Cleanup(state multistep.StateBag) { ui := state.Get("ui").(packer.Ui) ui.Say("Destroying server...") - err := client.PostServerAction(s.serverID, "terminate") + + err := client.DeleteServerForce(s.serverID) + if err != nil { ui.Error(fmt.Sprintf( "Error destroying server. Please destroy it manually: %s", err)) } + } diff --git a/builder/scaleway/step_terminate.go b/builder/scaleway/step_terminate.go deleted file mode 100644 index 154750931..000000000 --- a/builder/scaleway/step_terminate.go +++ /dev/null @@ -1,34 +0,0 @@ -package scaleway - -import ( - "fmt" - - "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" - "github.com/scaleway/scaleway-cli/pkg/api" -) - -type stepTerminate struct{} - -func (s *stepTerminate) Run(state multistep.StateBag) multistep.StepAction { - client := state.Get("client").(*api.ScalewayAPI) - ui := state.Get("ui").(packer.Ui) - serverID := state.Get("server_id").(string) - - ui.Say("Terminating server...") - - err := client.DeleteServerForce(serverID) - - if err != nil { - err := fmt.Errorf("Error terminating server: %s", err) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt - } - - return multistep.ActionContinue -} - -func (s *stepTerminate) Cleanup(state multistep.StateBag) { - // no cleanup -} From fc7d89eb79887f21cdb67a1e97f3e6e848134ce0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Loi=CC=88c=20Carr?= Date: Fri, 5 Jan 2018 09:43:52 +0100 Subject: [PATCH 222/251] builder/scaleway: support password auth --- builder/scaleway/ssh.go | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/builder/scaleway/ssh.go b/builder/scaleway/ssh.go index 017138022..d1ac5dfc8 100644 --- a/builder/scaleway/ssh.go +++ b/builder/scaleway/ssh.go @@ -2,9 +2,10 @@ package scaleway import ( "fmt" - "golang.org/x/crypto/ssh" + packerssh "github.com/hashicorp/packer/communicator/ssh" "github.com/mitchellh/multistep" + "golang.org/x/crypto/ssh" ) func commHost(state multistep.StateBag) (string, error) { @@ -14,17 +15,32 @@ func commHost(state multistep.StateBag) (string, error) { func sshConfig(state multistep.StateBag) (*ssh.ClientConfig, error) { config := state.Get("config").(Config) - privateKey := state.Get("privateKey").(string) + var privateKey string - signer, err := ssh.ParsePrivateKey([]byte(privateKey)) - if err != nil { - return nil, fmt.Errorf("Error setting up SSH config: %s", err) + var auth []ssh.AuthMethod + + if config.Comm.SSHPassword != "" { + auth = []ssh.AuthMethod{ + ssh.Password(config.Comm.SSHPassword), + ssh.KeyboardInteractive( + packerssh.PasswordKeyboardInteractive(config.Comm.SSHPassword)), + } + } + + if config.Comm.SSHPrivateKey != "" { + if priv, ok := state.GetOk("privateKey"); ok { + privateKey = priv.(string) + } + signer, err := ssh.ParsePrivateKey([]byte(privateKey)) + if err != nil { + return nil, fmt.Errorf("Error setting up SSH config: %s", err) + } + auth = append(auth, ssh.PublicKeys(signer)) } return &ssh.ClientConfig{ - User: config.Comm.SSHUsername, - Auth: []ssh.AuthMethod{ - ssh.PublicKeys(signer), - }, + User: config.Comm.SSHUsername, + Auth: auth, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), }, nil } From 7f8ed28bc6d89a8ce24e382caa09ffacc6a48288 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Loi=CC=88c=20Carr?= Date: Fri, 5 Jan 2018 09:57:59 +0100 Subject: [PATCH 223/251] builder/scaleway: Make use of NewRunnerWithPauseFn --- builder/scaleway/builder.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/builder/scaleway/builder.go b/builder/scaleway/builder.go index f2266576e..266b9e41b 100644 --- a/builder/scaleway/builder.go +++ b/builder/scaleway/builder.go @@ -4,6 +4,7 @@ package scaleway import ( + "errors" "fmt" "log" @@ -64,16 +65,24 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe new(stepImage), } - b.runner = common.NewRunner(steps, b.config.PackerConfig, ui) + b.runner = common.NewRunnerWithPauseFn(steps, b.config.PackerConfig, ui, state) b.runner.Run(state) if rawErr, ok := state.GetOk("error"); ok { return nil, rawErr.(error) } + // If we were interrupted or cancelled, then just exit. + if _, ok := state.GetOk(multistep.StateCancelled); ok { + return nil, errors.New("Build was cancelled.") + } + + if _, ok := state.GetOk(multistep.StateHalted); ok { + return nil, errors.New("Build was halted.") + } + if _, ok := state.GetOk("snapshot_name"); !ok { - log.Println("Failed to find snapshot_name in state. Bug?") - return nil, nil + return nil, errors.New("Cannot find snapshot_name in state.") } artifact := &Artifact{ From 1f7c32db98036f1c149813b0c2f5165928350cbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Loi=CC=88c=20Carr?= Date: Fri, 5 Jan 2018 10:07:03 +0100 Subject: [PATCH 224/251] builder/scaleway: report to ui scw api startup error --- builder/scaleway/builder.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builder/scaleway/builder.go b/builder/scaleway/builder.go index 266b9e41b..8c5d79fde 100644 --- a/builder/scaleway/builder.go +++ b/builder/scaleway/builder.go @@ -37,6 +37,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe client, err := api.NewScalewayAPI(b.config.Organization, b.config.Token, b.config.UserAgent, b.config.Region) if err != nil { + ui.Error(err.Error()) return nil, err } From 22b12432db290af7589dcdcd49eba39c95470c61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Loi=CC=88c=20Carr?= Date: Sat, 6 Jan 2018 14:56:46 +0100 Subject: [PATCH 225/251] builder/scaleway: support ssh agent authentication --- builder/scaleway/ssh.go | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/builder/scaleway/ssh.go b/builder/scaleway/ssh.go index d1ac5dfc8..2e566936e 100644 --- a/builder/scaleway/ssh.go +++ b/builder/scaleway/ssh.go @@ -2,10 +2,13 @@ package scaleway import ( "fmt" + "net" + "os" packerssh "github.com/hashicorp/packer/communicator/ssh" "github.com/mitchellh/multistep" "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" ) func commHost(state multistep.StateBag) (string, error) { @@ -19,12 +22,27 @@ func sshConfig(state multistep.StateBag) (*ssh.ClientConfig, error) { var auth []ssh.AuthMethod - if config.Comm.SSHPassword != "" { + if config.Comm.SSHAgentAuth { + authSock := os.Getenv("SSH_AUTH_SOCK") + if authSock == "" { + return nil, fmt.Errorf("SSH_AUTH_SOCK is not set") + } + + sshAgent, err := net.Dial("unix", authSock) + if err != nil { + return nil, fmt.Errorf("Cannot connect to SSH Agent socket %q: %s", authSock, err) + } auth = []ssh.AuthMethod{ + ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers), + } + } + + if config.Comm.SSHPassword != "" { + auth = append(auth, ssh.Password(config.Comm.SSHPassword), ssh.KeyboardInteractive( packerssh.PasswordKeyboardInteractive(config.Comm.SSHPassword)), - } + ) } if config.Comm.SSHPrivateKey != "" { From e752e3a01847351130f3854d77a3d00ccd5dca4b Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Mon, 5 Feb 2018 16:50:32 -0800 Subject: [PATCH 226/251] use new internal multistep helper --- builder/scaleway/builder.go | 2 +- builder/scaleway/ssh.go | 2 +- builder/scaleway/step_create_image.go | 5 +++-- builder/scaleway/step_create_server.go | 5 +++-- builder/scaleway/step_create_ssh_key.go | 5 +++-- builder/scaleway/step_server_info.go | 5 +++-- builder/scaleway/step_shutdown.go | 5 +++-- builder/scaleway/step_snapshot.go | 5 +++-- 8 files changed, 20 insertions(+), 14 deletions(-) diff --git a/builder/scaleway/builder.go b/builder/scaleway/builder.go index 8c5d79fde..457002c74 100644 --- a/builder/scaleway/builder.go +++ b/builder/scaleway/builder.go @@ -10,8 +10,8 @@ import ( "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/communicator" + "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" "github.com/scaleway/scaleway-cli/pkg/api" ) diff --git a/builder/scaleway/ssh.go b/builder/scaleway/ssh.go index 2e566936e..a2c9b8f16 100644 --- a/builder/scaleway/ssh.go +++ b/builder/scaleway/ssh.go @@ -6,7 +6,7 @@ import ( "os" packerssh "github.com/hashicorp/packer/communicator/ssh" - "github.com/mitchellh/multistep" + "github.com/hashicorp/packer/helper/multistep" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" ) diff --git a/builder/scaleway/step_create_image.go b/builder/scaleway/step_create_image.go index 313f2e421..16345e74d 100644 --- a/builder/scaleway/step_create_image.go +++ b/builder/scaleway/step_create_image.go @@ -1,17 +1,18 @@ package scaleway import ( + "context" "fmt" "log" + "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" "github.com/scaleway/scaleway-cli/pkg/api" ) type stepImage struct{} -func (s *stepImage) Run(state multistep.StateBag) multistep.StepAction { +func (s *stepImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { client := state.Get("client").(*api.ScalewayAPI) ui := state.Get("ui").(packer.Ui) c := state.Get("config").(Config) diff --git a/builder/scaleway/step_create_server.go b/builder/scaleway/step_create_server.go index ddd06fdf5..57511d149 100644 --- a/builder/scaleway/step_create_server.go +++ b/builder/scaleway/step_create_server.go @@ -1,11 +1,12 @@ package scaleway import ( + "context" "fmt" "strings" + "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" "github.com/scaleway/scaleway-cli/pkg/api" ) @@ -13,7 +14,7 @@ type stepCreateServer struct { serverID string } -func (s *stepCreateServer) Run(state multistep.StateBag) multistep.StepAction { +func (s *stepCreateServer) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { client := state.Get("client").(*api.ScalewayAPI) ui := state.Get("ui").(packer.Ui) c := state.Get("config").(Config) diff --git a/builder/scaleway/step_create_ssh_key.go b/builder/scaleway/step_create_ssh_key.go index dab8f710b..c5405ccd6 100644 --- a/builder/scaleway/step_create_ssh_key.go +++ b/builder/scaleway/step_create_ssh_key.go @@ -1,6 +1,7 @@ package scaleway import ( + "context" "crypto/rand" "crypto/rsa" "crypto/x509" @@ -12,8 +13,8 @@ import ( "runtime" "strings" + "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" "golang.org/x/crypto/ssh" ) @@ -23,7 +24,7 @@ type stepCreateSSHKey struct { PrivateKeyFile string } -func (s *stepCreateSSHKey) Run(state multistep.StateBag) multistep.StepAction { +func (s *stepCreateSSHKey) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) if s.PrivateKeyFile != "" { diff --git a/builder/scaleway/step_server_info.go b/builder/scaleway/step_server_info.go index 38502fe4d..7ab14658f 100644 --- a/builder/scaleway/step_server_info.go +++ b/builder/scaleway/step_server_info.go @@ -1,16 +1,17 @@ package scaleway import ( + "context" "fmt" + "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" "github.com/scaleway/scaleway-cli/pkg/api" ) type stepServerInfo struct{} -func (s *stepServerInfo) Run(state multistep.StateBag) multistep.StepAction { +func (s *stepServerInfo) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { client := state.Get("client").(*api.ScalewayAPI) ui := state.Get("ui").(packer.Ui) serverID := state.Get("server_id").(string) diff --git a/builder/scaleway/step_shutdown.go b/builder/scaleway/step_shutdown.go index b8e45be51..a8a029259 100644 --- a/builder/scaleway/step_shutdown.go +++ b/builder/scaleway/step_shutdown.go @@ -1,16 +1,17 @@ package scaleway import ( + "context" "fmt" + "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" "github.com/scaleway/scaleway-cli/pkg/api" ) type stepShutdown struct{} -func (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction { +func (s *stepShutdown) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { client := state.Get("client").(*api.ScalewayAPI) ui := state.Get("ui").(packer.Ui) serverID := state.Get("server_id").(string) diff --git a/builder/scaleway/step_snapshot.go b/builder/scaleway/step_snapshot.go index 20301a444..fd0dcb593 100644 --- a/builder/scaleway/step_snapshot.go +++ b/builder/scaleway/step_snapshot.go @@ -1,17 +1,18 @@ package scaleway import ( + "context" "fmt" "log" + "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/mitchellh/multistep" "github.com/scaleway/scaleway-cli/pkg/api" ) type stepSnapshot struct{} -func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { +func (s *stepSnapshot) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { client := state.Get("client").(*api.ScalewayAPI) ui := state.Get("ui").(packer.Ui) c := state.Get("config").(Config) From 8b7982480fcf5772aed796753eedf5a537885c66 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Mon, 5 Feb 2018 16:51:54 -0800 Subject: [PATCH 227/251] fix sidebar placement --- website/source/layouts/docs.erb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 271de572f..5f089e041 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -90,9 +90,6 @@ - > - Scaleway - > CloudStack @@ -165,6 +162,9 @@ > QEMU + > + Scaleway + > Triton From 44647ea185befa9b21668b011d0c17ffe0a212a9 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Mon, 5 Feb 2018 16:52:44 -0800 Subject: [PATCH 228/251] add vendor deps --- .../github.com/Sirupsen/logrus/CHANGELOG.md | 94 + vendor/github.com/Sirupsen/logrus/LICENSE | 21 + vendor/github.com/Sirupsen/logrus/README.md | 479 +++ vendor/github.com/Sirupsen/logrus/alt_exit.go | 64 + vendor/github.com/Sirupsen/logrus/doc.go | 26 + vendor/github.com/Sirupsen/logrus/entry.go | 275 ++ vendor/github.com/Sirupsen/logrus/exported.go | 193 ++ .../github.com/Sirupsen/logrus/formatter.go | 45 + vendor/github.com/Sirupsen/logrus/hooks.go | 34 + .../Sirupsen/logrus/json_formatter.go | 74 + vendor/github.com/Sirupsen/logrus/logger.go | 308 ++ vendor/github.com/Sirupsen/logrus/logrus.go | 143 + .../Sirupsen/logrus/terminal_bsd.go | 10 + .../Sirupsen/logrus/terminal_linux.go | 14 + .../Sirupsen/logrus/terminal_notwindows.go | 28 + .../Sirupsen/logrus/terminal_solaris.go | 21 + .../Sirupsen/logrus/terminal_windows.go | 33 + .../Sirupsen/logrus/text_formatter.go | 189 ++ vendor/github.com/Sirupsen/logrus/writer.go | 62 + vendor/github.com/creack/goselect/Dockerfile | 5 + vendor/github.com/creack/goselect/LICENSE | 22 + vendor/github.com/creack/goselect/README.md | 30 + vendor/github.com/creack/goselect/fdset.go | 33 + vendor/github.com/creack/goselect/fdset_32.go | 10 + vendor/github.com/creack/goselect/fdset_64.go | 11 + .../github.com/creack/goselect/fdset_doc.go | 93 + .../creack/goselect/fdset_freebsd.go | 33 + .../creack/goselect/fdset_unsupported.go | 20 + .../creack/goselect/fdset_windows.go | 57 + vendor/github.com/creack/goselect/select.go | 28 + .../creack/goselect/select_linux.go | 10 + .../creack/goselect/select_other.go | 9 + .../creack/goselect/select_unsupported.go | 16 + .../creack/goselect/select_windows.go | 13 + .../creack/goselect/test_crosscompile.sh | 17 + .../creack/goselect/zselect_windows.go | 41 + vendor/github.com/docker/docker/LICENSE | 191 ++ vendor/github.com/docker/docker/NOTICE | 19 + .../pkg/namesgenerator/names-generator.go | 608 ++++ .../docker/docker/pkg/random/random.go | 71 + vendor/github.com/dustin/go-humanize/LICENSE | 21 + .../dustin/go-humanize/README.markdown | 92 + vendor/github.com/dustin/go-humanize/big.go | 31 + .../github.com/dustin/go-humanize/bigbytes.go | 173 ++ vendor/github.com/dustin/go-humanize/bytes.go | 143 + vendor/github.com/dustin/go-humanize/comma.go | 108 + .../github.com/dustin/go-humanize/commaf.go | 40 + vendor/github.com/dustin/go-humanize/ftoa.go | 23 + .../github.com/dustin/go-humanize/humanize.go | 8 + .../github.com/dustin/go-humanize/number.go | 192 ++ .../github.com/dustin/go-humanize/ordinals.go | 25 + vendor/github.com/dustin/go-humanize/si.go | 113 + vendor/github.com/dustin/go-humanize/times.go | 117 + vendor/github.com/gorilla/websocket/AUTHORS | 8 + vendor/github.com/gorilla/websocket/LICENSE | 22 + vendor/github.com/gorilla/websocket/README.md | 64 + vendor/github.com/gorilla/websocket/client.go | 392 +++ .../gorilla/websocket/client_clone.go | 16 + .../gorilla/websocket/client_clone_legacy.go | 38 + .../gorilla/websocket/compression.go | 148 + vendor/github.com/gorilla/websocket/conn.go | 1149 +++++++ .../github.com/gorilla/websocket/conn_read.go | 18 + .../gorilla/websocket/conn_read_legacy.go | 21 + vendor/github.com/gorilla/websocket/doc.go | 180 ++ vendor/github.com/gorilla/websocket/json.go | 55 + vendor/github.com/gorilla/websocket/mask.go | 55 + .../github.com/gorilla/websocket/prepared.go | 103 + vendor/github.com/gorilla/websocket/server.go | 291 ++ vendor/github.com/gorilla/websocket/util.go | 214 ++ vendor/github.com/moul/anonuuid/LICENSE | 22 + vendor/github.com/moul/anonuuid/Makefile | 73 + vendor/github.com/moul/anonuuid/README.md | 170 + vendor/github.com/moul/anonuuid/anonuuid.go | 229 ++ vendor/github.com/moul/gotty-client/LICENSE | 22 + vendor/github.com/moul/gotty-client/Makefile | 81 + vendor/github.com/moul/gotty-client/README.md | 201 ++ vendor/github.com/moul/gotty-client/arch.go | 34 + .../moul/gotty-client/arch_windows.go | 16 + .../github.com/moul/gotty-client/glide.lock | 37 + .../github.com/moul/gotty-client/glide.yaml | 35 + .../moul/gotty-client/gotty-client.go | 469 +++ .../github.com/renstrom/fuzzysearch/LICENSE | 21 + .../renstrom/fuzzysearch/fuzzy/fuzzy.go | 167 + .../renstrom/fuzzysearch/fuzzy/levenshtein.go | 43 + .../scaleway/scaleway-cli/LICENSE.md | 22 + .../scaleway/scaleway-cli/pkg/api/README.md | 25 + .../scaleway/scaleway-cli/pkg/api/api.go | 2754 +++++++++++++++++ .../scaleway/scaleway-cli/pkg/api/cache.go | 814 +++++ .../scaleway/scaleway-cli/pkg/api/helpers.go | 685 ++++ .../scaleway/scaleway-cli/pkg/api/logger.go | 77 + .../scaleway-cli/pkg/sshcommand/sshcommand.go | 124 + .../scaleway/scaleway-cli/pkg/utils/quiet.go | 30 + .../scaleway/scaleway-cli/pkg/utils/utils.go | 253 ++ .../x/crypto/ssh/terminal/terminal.go | 951 ++++++ .../golang.org/x/crypto/ssh/terminal/util.go | 119 + .../x/crypto/ssh/terminal/util_bsd.go | 12 + .../x/crypto/ssh/terminal/util_linux.go | 11 + .../x/crypto/ssh/terminal/util_plan9.go | 58 + .../x/crypto/ssh/terminal/util_solaris.go | 73 + .../x/crypto/ssh/terminal/util_windows.go | 155 + vendor/golang.org/x/sync/LICENSE | 27 + vendor/golang.org/x/sync/PATENTS | 22 + vendor/golang.org/x/sync/errgroup/errgroup.go | 67 + 103 files changed, 15209 insertions(+) create mode 100644 vendor/github.com/Sirupsen/logrus/CHANGELOG.md create mode 100644 vendor/github.com/Sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/Sirupsen/logrus/README.md create mode 100644 vendor/github.com/Sirupsen/logrus/alt_exit.go create mode 100644 vendor/github.com/Sirupsen/logrus/doc.go create mode 100644 vendor/github.com/Sirupsen/logrus/entry.go create mode 100644 vendor/github.com/Sirupsen/logrus/exported.go create mode 100644 vendor/github.com/Sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/logger.go create mode 100644 vendor/github.com/Sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_bsd.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_linux.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_notwindows.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_solaris.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_windows.go create mode 100644 vendor/github.com/Sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/writer.go create mode 100644 vendor/github.com/creack/goselect/Dockerfile create mode 100644 vendor/github.com/creack/goselect/LICENSE create mode 100644 vendor/github.com/creack/goselect/README.md create mode 100644 vendor/github.com/creack/goselect/fdset.go create mode 100644 vendor/github.com/creack/goselect/fdset_32.go create mode 100644 vendor/github.com/creack/goselect/fdset_64.go create mode 100644 vendor/github.com/creack/goselect/fdset_doc.go create mode 100644 vendor/github.com/creack/goselect/fdset_freebsd.go create mode 100644 vendor/github.com/creack/goselect/fdset_unsupported.go create mode 100644 vendor/github.com/creack/goselect/fdset_windows.go create mode 100644 vendor/github.com/creack/goselect/select.go create mode 100644 vendor/github.com/creack/goselect/select_linux.go create mode 100644 vendor/github.com/creack/goselect/select_other.go create mode 100644 vendor/github.com/creack/goselect/select_unsupported.go create mode 100644 vendor/github.com/creack/goselect/select_windows.go create mode 100755 vendor/github.com/creack/goselect/test_crosscompile.sh create mode 100644 vendor/github.com/creack/goselect/zselect_windows.go create mode 100644 vendor/github.com/docker/docker/LICENSE create mode 100644 vendor/github.com/docker/docker/NOTICE create mode 100644 vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go create mode 100644 vendor/github.com/docker/docker/pkg/random/random.go create mode 100644 vendor/github.com/dustin/go-humanize/LICENSE create mode 100644 vendor/github.com/dustin/go-humanize/README.markdown create mode 100644 vendor/github.com/dustin/go-humanize/big.go create mode 100644 vendor/github.com/dustin/go-humanize/bigbytes.go create mode 100644 vendor/github.com/dustin/go-humanize/bytes.go create mode 100644 vendor/github.com/dustin/go-humanize/comma.go create mode 100644 vendor/github.com/dustin/go-humanize/commaf.go create mode 100644 vendor/github.com/dustin/go-humanize/ftoa.go create mode 100644 vendor/github.com/dustin/go-humanize/humanize.go create mode 100644 vendor/github.com/dustin/go-humanize/number.go create mode 100644 vendor/github.com/dustin/go-humanize/ordinals.go create mode 100644 vendor/github.com/dustin/go-humanize/si.go create mode 100644 vendor/github.com/dustin/go-humanize/times.go create mode 100644 vendor/github.com/gorilla/websocket/AUTHORS create mode 100644 vendor/github.com/gorilla/websocket/LICENSE create mode 100644 vendor/github.com/gorilla/websocket/README.md create mode 100644 vendor/github.com/gorilla/websocket/client.go create mode 100644 vendor/github.com/gorilla/websocket/client_clone.go create mode 100644 vendor/github.com/gorilla/websocket/client_clone_legacy.go create mode 100644 vendor/github.com/gorilla/websocket/compression.go create mode 100644 vendor/github.com/gorilla/websocket/conn.go create mode 100644 vendor/github.com/gorilla/websocket/conn_read.go create mode 100644 vendor/github.com/gorilla/websocket/conn_read_legacy.go create mode 100644 vendor/github.com/gorilla/websocket/doc.go create mode 100644 vendor/github.com/gorilla/websocket/json.go create mode 100644 vendor/github.com/gorilla/websocket/mask.go create mode 100644 vendor/github.com/gorilla/websocket/prepared.go create mode 100644 vendor/github.com/gorilla/websocket/server.go create mode 100644 vendor/github.com/gorilla/websocket/util.go create mode 100644 vendor/github.com/moul/anonuuid/LICENSE create mode 100644 vendor/github.com/moul/anonuuid/Makefile create mode 100644 vendor/github.com/moul/anonuuid/README.md create mode 100644 vendor/github.com/moul/anonuuid/anonuuid.go create mode 100644 vendor/github.com/moul/gotty-client/LICENSE create mode 100644 vendor/github.com/moul/gotty-client/Makefile create mode 100644 vendor/github.com/moul/gotty-client/README.md create mode 100644 vendor/github.com/moul/gotty-client/arch.go create mode 100644 vendor/github.com/moul/gotty-client/arch_windows.go create mode 100644 vendor/github.com/moul/gotty-client/glide.lock create mode 100644 vendor/github.com/moul/gotty-client/glide.yaml create mode 100644 vendor/github.com/moul/gotty-client/gotty-client.go create mode 100644 vendor/github.com/renstrom/fuzzysearch/LICENSE create mode 100644 vendor/github.com/renstrom/fuzzysearch/fuzzy/fuzzy.go create mode 100644 vendor/github.com/renstrom/fuzzysearch/fuzzy/levenshtein.go create mode 100644 vendor/github.com/scaleway/scaleway-cli/LICENSE.md create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/api/README.md create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/api/helpers.go create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/sshcommand/sshcommand.go create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/utils/quiet.go create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/utils/utils.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_linux.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_windows.go create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md new file mode 100644 index 000000000..747e4d89a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,94 @@ +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 000000000..f090cb42f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md new file mode 100644 index 000000000..c32287611 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,479 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus) + +**Seeing weird case-sensitive problems?** See [this +issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021). +This change has been reverted. I apologize for causing this. I greatly +underestimated the impact this would have. Logrus strives for stability and +backwards compatibility and failed to provide that. + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not +yet stable (pre 1.0). Logrus itself is completely stable and has been used in +many large deployments. The core API is unlikely to change much but please +version control your Logrus to make sure you aren't fetching latest `master` on +every build.** + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/Sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/Sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/Sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging though logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/Sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +| Hook | Description | +| ----- | ----------- | +| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | +| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | +| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) | +| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| +| [Firehose](https://github.com/beaubrewer/firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/) +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | +| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | +| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | +| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/Sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +logger, hook := NewNullLogger() +logger.Error("Hello error") + +assert.Equal(1, len(hook.Entries)) +assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) +assert.Equal("Hello error", hook.LastEntry().Message) + +hook.Reset() +assert.Nil(hook.LastEntry()) +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go new file mode 100644 index 000000000..8af90637a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go @@ -0,0 +1,64 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke +// all handlers. The handlers will also be invoked when any Fatal log entry is +// made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go new file mode 100644 index 000000000..dddd5f877 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/Sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/Sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 000000000..4edbe7a2d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,275 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "sync" + "time" +) + +var bufferPool *sync.Pool + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), an Buffer may be set to entry + Buffer *bytes.Buffer +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer + serialized, err := entry.Logger.Formatter.Format(&entry) + entry.Buffer = nil + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } else { + entry.Logger.mu.Lock() + _, err = entry.Logger.Out.Write(serialized) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + entry.Logger.mu.Unlock() + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 000000000..9a0120ac1 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,193 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.Level = level +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.Level +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 000000000..b5fbe934d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,45 @@ +package logrus + +import "time" + +const DefaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + if t, ok := data["time"]; ok { + data["fields.time"] = t + } + + if m, ok := data["msg"]; ok { + data["fields.msg"] = m + } + + if l, ok := data["level"]; ok { + data["fields.level"] = l + } +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 000000000..3f151cdc3 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 000000000..266554e9f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,74 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type fieldKey string +type FieldMap map[fieldKey]string + +const ( + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" +) + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // FieldMap allows users to customize the names of keys for various fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyLevel: "@message", + // }, + // } + FieldMap FieldMap +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 000000000..b769f3d35 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,308 @@ +package logrus + +import ( + "io" + "os" + "sync" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool +} + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + logger.entryPool.Put(entry) +} + +// Adds a field to the log entry, note that it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debugf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Infof(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Errorf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatalf(format, args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panicf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debug(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Error(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatal(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panic(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debugln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Infoln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Errorln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatalln(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panicln(args...) + logger.releaseEntry(entry) + } +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 000000000..e59669111 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,143 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint8 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go new file mode 100644 index 000000000..5f6be4d3c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,10 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 000000000..308160ca8 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,14 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 000000000..190297abf --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,28 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package logrus + +import ( + "io" + "os" + "syscall" + "unsafe" +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + var termios Termios + switch v := f.(type) { + case *os.File: + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go new file mode 100644 index 000000000..3c86b1abe --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go @@ -0,0 +1,21 @@ +// +build solaris,!appengine + +package logrus + +import ( + "io" + "os" + + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + switch v := f.(type) { + case *os.File: + _, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA) + return err == nil + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 000000000..05d2f91f1 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,33 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,!appengine + +package logrus + +import ( + "io" + "os" + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + switch v := f.(type) { + case *os.File: + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 000000000..ba8885406 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,189 @@ +package logrus + +import ( + "bytes" + "fmt" + "sort" + "strings" + "sync" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time +) + +func init() { + baseTimestamp = time.Now() +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // QuoteCharacter can be set to the override the default quoting character " + // with something else. For example: ', or `. + QuoteCharacter string + + // Whether the logger's out is to a terminal + isTerminal bool + + sync.Once +} + +func (f *TextFormatter) init(entry *Entry) { + if len(f.QuoteCharacter) == 0 { + f.QuoteCharacter = "\"" + } + if entry.Logger != nil { + f.isTerminal = IsTerminal(entry.Logger.Out) + } +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var b *bytes.Buffer + keys := make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + prefixFieldClashes(entry.Data) + + f.Do(func() { f.init(entry) }) + + isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + if entry.Message != "" { + f.appendKeyValue(b, "msg", entry.Message) + } + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) + b.WriteByte(' ') +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + switch value := value.(type) { + case string: + if !f.needsQuoting(value) { + b.WriteString(value) + } else { + fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter) + } + case error: + errmsg := value.Error() + if !f.needsQuoting(errmsg) { + b.WriteString(errmsg) + } else { + fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter) + } + default: + fmt.Fprint(b, value) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 000000000..7bdebedc6 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,62 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/creack/goselect/Dockerfile b/vendor/github.com/creack/goselect/Dockerfile new file mode 100644 index 000000000..d03b5a9d9 --- /dev/null +++ b/vendor/github.com/creack/goselect/Dockerfile @@ -0,0 +1,5 @@ +FROM google/golang:stable +MAINTAINER Guillaume J. Charmes +CMD /tmp/a.out +ADD . /src +RUN cd /src && go build -o /tmp/a.out diff --git a/vendor/github.com/creack/goselect/LICENSE b/vendor/github.com/creack/goselect/LICENSE new file mode 100644 index 000000000..95c600af1 --- /dev/null +++ b/vendor/github.com/creack/goselect/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Guillaume J. Charmes + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/creack/goselect/README.md b/vendor/github.com/creack/goselect/README.md new file mode 100644 index 000000000..d5d06510e --- /dev/null +++ b/vendor/github.com/creack/goselect/README.md @@ -0,0 +1,30 @@ +# go-select + +select(2) implementation in Go + +## Supported platforms + +| | 386 | amd64 | arm | arm64 | +|---------------|-----|-------|-----|-------| +| **linux** | yes | yes | yes | yes | +| **darwin** | yes | yes | n/a | ?? | +| **freebsd** | yes | yes | yes | ?? | +| **openbsd** | yes | yes | yes | ?? | +| **netbsd** | yes | yes | yes | ?? | +| **dragonfly** | n/a | yes | n/a | ?? | +| **solaris** | n/a | no | n/a | ?? | +| **plan9** | no | no | n/a | ?? | +| **windows** | yes | yes | n/a | ?? | +| **android** | n/a | n/a | no | ?? | + +*n/a: platform not supported by Go + +Go on `plan9` and `solaris` do not implement `syscall.Select` not `syscall.SYS_SELECT`. + +## Cross compile + +Using davecheney's https://github.com/davecheney/golang-crosscompile + +``` +export PLATFORMS="darwin/386 darwin/amd64 freebsd/386 freebsd/amd64 freebsd/arm linux/386 linux/amd64 linux/arm windows/386 windows/amd64 openbsd/386 openbsd/amd64 netbsd/386 netbsd/amd64 dragonfly/amd64 plan9/386 plan9/amd64 solaris/amd64" +``` diff --git a/vendor/github.com/creack/goselect/fdset.go b/vendor/github.com/creack/goselect/fdset.go new file mode 100644 index 000000000..2ff3f6c7a --- /dev/null +++ b/vendor/github.com/creack/goselect/fdset.go @@ -0,0 +1,33 @@ +// +build !freebsd,!windows,!plan9 + +package goselect + +import "syscall" + +const FD_SETSIZE = syscall.FD_SETSIZE + +// FDSet wraps syscall.FdSet with convenience methods +type FDSet syscall.FdSet + +// Set adds the fd to the set +func (fds *FDSet) Set(fd uintptr) { + fds.Bits[fd/NFDBITS] |= (1 << (fd % NFDBITS)) +} + +// Clear remove the fd from the set +func (fds *FDSet) Clear(fd uintptr) { + fds.Bits[fd/NFDBITS] &^= (1 << (fd % NFDBITS)) +} + +// IsSet check if the given fd is set +func (fds *FDSet) IsSet(fd uintptr) bool { + return fds.Bits[fd/NFDBITS]&(1<<(fd%NFDBITS)) != 0 +} + +// Keep a null set to avoid reinstatiation +var nullFdSet = &FDSet{} + +// Zero empties the Set +func (fds *FDSet) Zero() { + copy(fds.Bits[:], (nullFdSet).Bits[:]) +} diff --git a/vendor/github.com/creack/goselect/fdset_32.go b/vendor/github.com/creack/goselect/fdset_32.go new file mode 100644 index 000000000..8b90d21a9 --- /dev/null +++ b/vendor/github.com/creack/goselect/fdset_32.go @@ -0,0 +1,10 @@ +// +build darwin openbsd netbsd 386 arm + +package goselect + +// darwin, netbsd and openbsd uses uint32 on both amd64 and 386 + +const ( + // NFDBITS is the amount of bits per mask + NFDBITS = 4 * 8 +) diff --git a/vendor/github.com/creack/goselect/fdset_64.go b/vendor/github.com/creack/goselect/fdset_64.go new file mode 100644 index 000000000..4e032e4ea --- /dev/null +++ b/vendor/github.com/creack/goselect/fdset_64.go @@ -0,0 +1,11 @@ +// +build !darwin,!netbsd,!openbsd +// +build amd64 arm64 + +package goselect + +// darwin, netbsd and openbsd uses uint32 on both amd64 and 386 + +const ( + // NFDBITS is the amount of bits per mask + NFDBITS = 8 * 8 +) diff --git a/vendor/github.com/creack/goselect/fdset_doc.go b/vendor/github.com/creack/goselect/fdset_doc.go new file mode 100644 index 000000000..d9f15a1ce --- /dev/null +++ b/vendor/github.com/creack/goselect/fdset_doc.go @@ -0,0 +1,93 @@ +package goselect + +/** +From: XCode's MacOSX10.10.sdk/System/Library/Frameworks/Kernel.framework/Versions/A/Headers/sys/select.h +-- +// darwin/amd64 / 386 +sizeof(__int32_t) == 4 +-- + +typedef __int32_t __fd_mask; + +#define FD_SETSIZE 1024 +#define __NFDBITS (sizeof(__fd_mask) * 8) +#define __howmany(x, y) ((((x) % (y)) == 0) ? ((x) / (y)) : (((x) / (y)) + 1)) + +typedef struct fd_set { + __fd_mask fds_bits[__howmany(__FD_SETSIZE, __NFDBITS)]; +} fd_set; + +#define __FD_MASK(n) ((__fd_mask)1 << ((n) % __NFDBITS)) +#define FD_SET(n, p) ((p)->fds_bits[(n)/__NFDBITS] |= __FD_MASK(n)) +#define FD_CLR(n, p) ((p)->fds_bits[(n)/__NFDBITS] &= ~__FD_MASK(n)) +#define FD_ISSET(n, p) (((p)->fds_bits[(n)/__NFDBITS] & __FD_MASK(n)) != 0) +*/ + +/** +From: /usr/include/i386-linux-gnu/sys/select.h +-- +// linux/i686 +sizeof(long int) == 4 +-- + +typedef long int __fd_mask; + +#define FD_SETSIZE 1024 +#define __NFDBITS (sizeof(__fd_mask) * 8) + + +typedef struct fd_set { + __fd_mask fds_bits[__FD_SETSIZE / __NFDBITS]; +} fd_set; + +#define __FD_MASK(n) ((__fd_mask)1 << ((n) % __NFDBITS)) +#define FD_SET(n, p) ((p)->fds_bits[(n)/__NFDBITS] |= __FD_MASK(n)) +#define FD_CLR(n, p) ((p)->fds_bits[(n)/__NFDBITS] &= ~__FD_MASK(n)) +#define FD_ISSET(n, p) (((p)->fds_bits[(n)/__NFDBITS] & __FD_MASK(n)) != 0) +*/ + +/** +From: /usr/include/x86_64-linux-gnu/sys/select.h +-- +// linux/amd64 +sizeof(long int) == 8 +-- + +typedef long int __fd_mask; + +#define FD_SETSIZE 1024 +#define __NFDBITS (sizeof(__fd_mask) * 8) + + +typedef struct fd_set { + __fd_mask fds_bits[__FD_SETSIZE / __NFDBITS]; +} fd_set; + +#define __FD_MASK(n) ((__fd_mask)1 << ((n) % __NFDBITS)) +#define FD_SET(n, p) ((p)->fds_bits[(n)/__NFDBITS] |= __FD_MASK(n)) +#define FD_CLR(n, p) ((p)->fds_bits[(n)/__NFDBITS] &= ~__FD_MASK(n)) +#define FD_ISSET(n, p) (((p)->fds_bits[(n)/__NFDBITS] & __FD_MASK(n)) != 0) +*/ + +/** +From: /usr/include/sys/select.h +-- +// freebsd/amd64 +sizeof(unsigned long) == 8 +-- + +typedef unsigned long __fd_mask; + +#define FD_SETSIZE 1024U +#define __NFDBITS (sizeof(__fd_mask) * 8) +#define _howmany(x, y) (((x) + ((y) - 1)) / (y)) + +typedef struct fd_set { + __fd_mask fds_bits[_howmany(FD_SETSIZE, __NFDBITS)]; +} fd_set; + +#define __FD_MASK(n) ((__fd_mask)1 << ((n) % __NFDBITS)) +#define FD_SET(n, p) ((p)->fds_bits[(n)/__NFDBITS] |= __FD_MASK(n)) +#define FD_CLR(n, p) ((p)->fds_bits[(n)/__NFDBITS] &= ~__FD_MASK(n)) +#define FD_ISSET(n, p) (((p)->fds_bits[(n)/__NFDBITS] & __FD_MASK(n)) != 0) +*/ diff --git a/vendor/github.com/creack/goselect/fdset_freebsd.go b/vendor/github.com/creack/goselect/fdset_freebsd.go new file mode 100644 index 000000000..03f7b9127 --- /dev/null +++ b/vendor/github.com/creack/goselect/fdset_freebsd.go @@ -0,0 +1,33 @@ +// +build freebsd + +package goselect + +import "syscall" + +const FD_SETSIZE = syscall.FD_SETSIZE + +// FDSet wraps syscall.FdSet with convenience methods +type FDSet syscall.FdSet + +// Set adds the fd to the set +func (fds *FDSet) Set(fd uintptr) { + fds.X__fds_bits[fd/NFDBITS] |= (1 << (fd % NFDBITS)) +} + +// Clear remove the fd from the set +func (fds *FDSet) Clear(fd uintptr) { + fds.X__fds_bits[fd/NFDBITS] &^= (1 << (fd % NFDBITS)) +} + +// IsSet check if the given fd is set +func (fds *FDSet) IsSet(fd uintptr) bool { + return fds.X__fds_bits[fd/NFDBITS]&(1<<(fd%NFDBITS)) != 0 +} + +// Keep a null set to avoid reinstatiation +var nullFdSet = &FDSet{} + +// Zero empties the Set +func (fds *FDSet) Zero() { + copy(fds.X__fds_bits[:], (nullFdSet).X__fds_bits[:]) +} diff --git a/vendor/github.com/creack/goselect/fdset_unsupported.go b/vendor/github.com/creack/goselect/fdset_unsupported.go new file mode 100644 index 000000000..cbd8d5880 --- /dev/null +++ b/vendor/github.com/creack/goselect/fdset_unsupported.go @@ -0,0 +1,20 @@ +// +build plan9 + +package goselect + +const FD_SETSIZE = 0 + +// FDSet wraps syscall.FdSet with convenience methods +type FDSet struct{} + +// Set adds the fd to the set +func (fds *FDSet) Set(fd uintptr) {} + +// Clear remove the fd from the set +func (fds *FDSet) Clear(fd uintptr) {} + +// IsSet check if the given fd is set +func (fds *FDSet) IsSet(fd uintptr) bool { return false } + +// Zero empties the Set +func (fds *FDSet) Zero() {} diff --git a/vendor/github.com/creack/goselect/fdset_windows.go b/vendor/github.com/creack/goselect/fdset_windows.go new file mode 100644 index 000000000..ee3491975 --- /dev/null +++ b/vendor/github.com/creack/goselect/fdset_windows.go @@ -0,0 +1,57 @@ +// +build windows + +package goselect + +import "syscall" + +const FD_SETSIZE = 64 + +// FDSet extracted from mingw libs source code +type FDSet struct { + fd_count uint + fd_array [FD_SETSIZE]uintptr +} + +// Set adds the fd to the set +func (fds *FDSet) Set(fd uintptr) { + var i uint + for i = 0; i < fds.fd_count; i++ { + if fds.fd_array[i] == fd { + break + } + } + if i == fds.fd_count { + if fds.fd_count < FD_SETSIZE { + fds.fd_array[i] = fd + fds.fd_count++ + } + } +} + +// Clear remove the fd from the set +func (fds *FDSet) Clear(fd uintptr) { + var i uint + for i = 0; i < fds.fd_count; i++ { + if fds.fd_array[i] == fd { + for i < fds.fd_count-1 { + fds.fd_array[i] = fds.fd_array[i+1] + i++ + } + fds.fd_count-- + break + } + } +} + +// IsSet check if the given fd is set +func (fds *FDSet) IsSet(fd uintptr) bool { + if isset, err := __WSAFDIsSet(syscall.Handle(fd), fds); err == nil && isset != 0 { + return true + } + return false +} + +// Zero empties the Set +func (fds *FDSet) Zero() { + fds.fd_count = 0 +} diff --git a/vendor/github.com/creack/goselect/select.go b/vendor/github.com/creack/goselect/select.go new file mode 100644 index 000000000..7f2875e73 --- /dev/null +++ b/vendor/github.com/creack/goselect/select.go @@ -0,0 +1,28 @@ +package goselect + +import ( + "syscall" + "time" +) + +// Select wraps syscall.Select with Go types +func Select(n int, r, w, e *FDSet, timeout time.Duration) error { + var timeval *syscall.Timeval + if timeout >= 0 { + t := syscall.NsecToTimeval(timeout.Nanoseconds()) + timeval = &t + } + + return sysSelect(n, r, w, e, timeval) +} + +// RetrySelect wraps syscall.Select with Go types, and retries a number of times, with a given retryDelay. +func RetrySelect(n int, r, w, e *FDSet, timeout time.Duration, retries int, retryDelay time.Duration) (err error) { + for i := 0; i < retries; i++ { + if err = Select(n, r, w, e, timeout); err != syscall.EINTR { + return err + } + time.Sleep(retryDelay) + } + return err +} diff --git a/vendor/github.com/creack/goselect/select_linux.go b/vendor/github.com/creack/goselect/select_linux.go new file mode 100644 index 000000000..acd569e9d --- /dev/null +++ b/vendor/github.com/creack/goselect/select_linux.go @@ -0,0 +1,10 @@ +// +build linux + +package goselect + +import "syscall" + +func sysSelect(n int, r, w, e *FDSet, timeout *syscall.Timeval) error { + _, err := syscall.Select(n, (*syscall.FdSet)(r), (*syscall.FdSet)(w), (*syscall.FdSet)(e), timeout) + return err +} diff --git a/vendor/github.com/creack/goselect/select_other.go b/vendor/github.com/creack/goselect/select_other.go new file mode 100644 index 000000000..6c8208147 --- /dev/null +++ b/vendor/github.com/creack/goselect/select_other.go @@ -0,0 +1,9 @@ +// +build !linux,!windows,!plan9,!solaris + +package goselect + +import "syscall" + +func sysSelect(n int, r, w, e *FDSet, timeout *syscall.Timeval) error { + return syscall.Select(n, (*syscall.FdSet)(r), (*syscall.FdSet)(w), (*syscall.FdSet)(e), timeout) +} diff --git a/vendor/github.com/creack/goselect/select_unsupported.go b/vendor/github.com/creack/goselect/select_unsupported.go new file mode 100644 index 000000000..bea0ad94a --- /dev/null +++ b/vendor/github.com/creack/goselect/select_unsupported.go @@ -0,0 +1,16 @@ +// +build plan9 solaris + +package goselect + +import ( + "fmt" + "runtime" + "syscall" +) + +// ErrUnsupported . +var ErrUnsupported = fmt.Errorf("Platofrm %s/%s unsupported", runtime.GOOS, runtime.GOARCH) + +func sysSelect(n int, r, w, e *FDSet, timeout *syscall.Timeval) error { + return ErrUnsupported +} diff --git a/vendor/github.com/creack/goselect/select_windows.go b/vendor/github.com/creack/goselect/select_windows.go new file mode 100644 index 000000000..0fb70d5d2 --- /dev/null +++ b/vendor/github.com/creack/goselect/select_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package goselect + +import "syscall" + +//sys _select(nfds int, readfds *FDSet, writefds *FDSet, exceptfds *FDSet, timeout *syscall.Timeval) (total int, err error) = ws2_32.select +//sys __WSAFDIsSet(handle syscall.Handle, fdset *FDSet) (isset int, err error) = ws2_32.__WSAFDIsSet + +func sysSelect(n int, r, w, e *FDSet, timeout *syscall.Timeval) error { + _, err := _select(n, r, w, e, timeout) + return err +} diff --git a/vendor/github.com/creack/goselect/test_crosscompile.sh b/vendor/github.com/creack/goselect/test_crosscompile.sh new file mode 100755 index 000000000..533ca6647 --- /dev/null +++ b/vendor/github.com/creack/goselect/test_crosscompile.sh @@ -0,0 +1,17 @@ +export GOOS=linux; export GOARCH=arm; echo $GOOS/$GOARCH; go build +export GOOS=linux; export GOARCH=arm64; echo $GOOS/$GOARCH; go build +export GOOS=linux; export GOARCH=amd64; echo $GOOS/$GOARCH; go build +export GOOS=linux; export GOARCH=386; echo $GOOS/$GOARCH; go build +export GOOS=darwin; export GOARCH=arm; echo $GOOS/$GOARCH; go build +export GOOS=darwin; export GOARCH=arm64; echo $GOOS/$GOARCH; go build +export GOOS=darwin; export GOARCH=amd64; echo $GOOS/$GOARCH; go build +export GOOS=darwin; export GOARCH=386; echo $GOOS/$GOARCH; go build +export GOOS=freebsd; export GOARCH=arm; echo $GOOS/$GOARCH; go build +export GOOS=freebsd; export GOARCH=amd64; echo $GOOS/$GOARCH; go build +export GOOS=freebsd; export GOARCH=386; echo $GOOS/$GOARCH; go build +export GOOS=openbsd; export GOARCH=arm; echo $GOOS/$GOARCH; go build +export GOOS=openbsd; export GOARCH=amd64; echo $GOOS/$GOARCH; go build +export GOOS=openbsd; export GOARCH=386; echo $GOOS/$GOARCH; go build +export GOOS=netbsd; export GOARCH=arm; echo $GOOS/$GOARCH; go build +export GOOS=netbsd; export GOARCH=amd64; echo $GOOS/$GOARCH; go build +export GOOS=netbsd; export GOARCH=386; echo $GOOS/$GOARCH; go build diff --git a/vendor/github.com/creack/goselect/zselect_windows.go b/vendor/github.com/creack/goselect/zselect_windows.go new file mode 100644 index 000000000..c3b10e1d0 --- /dev/null +++ b/vendor/github.com/creack/goselect/zselect_windows.go @@ -0,0 +1,41 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package goselect + +import "unsafe" +import "syscall" + +var _ unsafe.Pointer + +var ( + modws2_32 = syscall.NewLazyDLL("ws2_32.dll") + + procselect = modws2_32.NewProc("select") + proc__WSAFDIsSet = modws2_32.NewProc("__WSAFDIsSet") +) + +func _select(nfds int, readfds *FDSet, writefds *FDSet, exceptfds *FDSet, timeout *syscall.Timeval) (total int, err error) { + r0, _, e1 := syscall.Syscall6(procselect.Addr(), 5, uintptr(nfds), uintptr(unsafe.Pointer(readfds)), uintptr(unsafe.Pointer(writefds)), uintptr(unsafe.Pointer(exceptfds)), uintptr(unsafe.Pointer(timeout)), 0) + total = int(r0) + if total == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func __WSAFDIsSet(handle syscall.Handle, fdset *FDSet) (isset int, err error) { + r0, _, e1 := syscall.Syscall(proc__WSAFDIsSet.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(fdset)), 0) + isset = int(r0) + if isset == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE new file mode 100644 index 000000000..9c8e20ab8 --- /dev/null +++ b/vendor/github.com/docker/docker/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE new file mode 100644 index 000000000..0c74e15b0 --- /dev/null +++ b/vendor/github.com/docker/docker/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go new file mode 100644 index 000000000..d732a4795 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go @@ -0,0 +1,608 @@ +package namesgenerator + +import ( + "fmt" + + "github.com/docker/docker/pkg/random" +) + +var ( + left = [...]string{ + "admiring", + "adoring", + "affectionate", + "agitated", + "amazing", + "angry", + "awesome", + "blissful", + "boring", + "brave", + "clever", + "cocky", + "compassionate", + "competent", + "condescending", + "confident", + "cranky", + "dazzling", + "determined", + "distracted", + "dreamy", + "eager", + "ecstatic", + "elastic", + "elated", + "elegant", + "eloquent", + "epic", + "fervent", + "festive", + "flamboyant", + "focused", + "friendly", + "frosty", + "gallant", + "gifted", + "goofy", + "gracious", + "happy", + "hardcore", + "heuristic", + "hopeful", + "hungry", + "infallible", + "inspiring", + "jolly", + "jovial", + "keen", + "kind", + "laughing", + "loving", + "lucid", + "mystifying", + "modest", + "musing", + "naughty", + "nervous", + "nifty", + "nostalgic", + "objective", + "optimistic", + "peaceful", + "pedantic", + "pensive", + "practical", + "priceless", + "quirky", + "quizzical", + "relaxed", + "reverent", + "romantic", + "sad", + "serene", + "sharp", + "silly", + "sleepy", + "stoic", + "stupefied", + "suspicious", + "tender", + "thirsty", + "trusting", + "unruffled", + "upbeat", + "vibrant", + "vigilant", + "vigorous", + "wizardly", + "wonderful", + "xenodochial", + "youthful", + "zealous", + "zen", + } + + // Docker, starting from 0.7.x, generates names from notable scientists and hackers. + // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. + right = [...]string{ + // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB + "albattani", + + // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen + "allen", + + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida + "almeida", + + // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi + "agnesi", + + // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes + "archimedes", + + // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli + "ardinghelli", + + // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata + "aryabhata", + + // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin + "austin", + + // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. + "babbage", + + // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach + "banach", + + // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen + "bardeen", + + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik + "bartik", + + // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi + "bassi", + + // Hugh Beaver, British engineer, founder of the Guinness Book of World Records https://en.wikipedia.org/wiki/Hugh_Beaver + "beaver", + + // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell + "bell", + + // Karl Friedrich Benz - a German automobile engineer. Inventor of the first practical motorcar. https://en.wikipedia.org/wiki/Karl_Benz + "benz", + + // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha + "bhabha", + + // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus + "bhaskara", + + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell + "blackwell", + + // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. + "bohr", + + // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth + "booth", + + // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg + "borg", + + // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose + "bose", + + // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville + "boyd", + + // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero + "brahmagupta", + + // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain + "brattain", + + // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) + "brown", + + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson + "carson", + + // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar + "chandrasekhar", + + //Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) + "shannon", + + // Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained top secret for decades. Also an accomplished numismatist https://en.wikipedia.org/wiki/Joan_Clarke + "clarke", + + // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden + "colden", + + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori + "cori", + + // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray + "cray", + + // This entry reflects a husband and wife team who worked together: + // Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran + // Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran + "curran", + + // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. + "curie", + + // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. + "darwin", + + // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. + "davinci", + + // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. + "dijkstra", + + // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky + "dubinsky", + + // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley + "easley", + + // Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison + "edison", + + // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein + "einstein", + + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion + "elion", + + // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart + "engelbart", + + // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid + "euclid", + + // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler + "euler", + + // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat + "fermat", + + // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. + "fermi", + + // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman + "feynman", + + // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. + "franklin", + + // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei + "galileo", + + // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates + "gates", + + // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) + "goldberg", + + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine + "goldstine", + + // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser + "goldwasser", + + // James Golick, all around gangster. + "golick", + + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall + "goodall", + + // Lois Haibt - American computer scientist, part of the team at IBM that developed FORTRAN - https://en.wikipedia.org/wiki/Lois_Haibt + "haibt", + + // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) + "hamilton", + + // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking + "hawking", + + // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg + "heisenberg", + + // Grete Hermann was a German philosopher noted for her philosophical work on the foundations of quantum mechanics. https://en.wikipedia.org/wiki/Grete_Hermann + "hermann", + + // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD + "heyrovsky", + + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin + "hodgkin", + + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover + "hoover", + + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper + "hopper", + + // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle + "hugle", + + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia + "hypatia", + + // Mary Jackson, American mathematician and aerospace engineer who earned the highest title within NASA's engineering department - https://en.wikipedia.org/wiki/Mary_Jackson_(engineer) + "jackson", + + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil + "jang", + + // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik + "jennings", + + // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen + "jepsen", + + // Katherine Coleman Goble Johnson - American physicist and mathematician contributed to the NASA. https://en.wikipedia.org/wiki/Katherine_Johnson + "johnson", + + // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie + "joliot", + + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones + "jones", + + // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam + "kalam", + + // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare + "kare", + + // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller + "keller", + + // Johannes Kepler, German astronomer known for his three laws of planetary motion - https://en.wikipedia.org/wiki/Johannes_Kepler + "kepler", + + // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana + "khorana", + + // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby + "kilby", + + // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + "kirch", + + // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth + "knuth", + + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya + "kowalevski", + + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + "lalande", + + // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr + "lamarr", + + // Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport + "lamport", + + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey + "leakey", + + // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt + "leavitt", + + //Daniel Lewin - Mathematician, Akamai co-founder, soldier, 9/11 victim-- Developed optimization techniques for routing traffic on the internet. Died attempting to stop the 9-11 hijackers. https://en.wikipedia.org/wiki/Daniel_Lewin + "lewin", + + // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum + "lichterman", + + // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov + "liskov", + + // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + "lovelace", + + // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re + "lumiere", + + // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) + "mahavira", + + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer + "mayer", + + // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + "mccarthy", + + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock + "mcclintock", + + // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean + "mclean", + + // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli + "mcnulty", + + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner + "meitner", + + // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky + "meninsky", + + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf + "mestorf", + + // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky + "minsky", + + // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani + "mirzakhani", + + // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse + "morse", + + // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock + "murdock", + + // John von Neumann - todays computer architectures are based on the von Neumann architecture. https://en.wikipedia.org/wiki/Von_Neumann_architecture + "neumann", + + // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton + "newton", + + // Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https://en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform + "nightingale", + + // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel + "nobel", + + // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether + "noether", + + // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 + "northcutt", + + // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce + "noyce", + + // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems + "panini", + + // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 + "pare", + + // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. + "pasteur", + + // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin + "payne", + + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman + "perlman", + + // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike + "pike", + + // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + "poincare", + + // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras + "poitras", + + // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy + "ptolemy", + + // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman + "raman", + + // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan + "ramanujan", + + // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride + "ride", + + // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) + "montalcini", + + // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie + "ritchie", + + // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen + "roentgen", + + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin + "rosalind", + + // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha + "saha", + + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet + "sammet", + + // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) + "shaw", + + // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley + "shirley", + + // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley + "shockley", + + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi + "sinoussi", + + // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton + "snyder", + + // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence + "spence", + + // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman + "stallman", + + // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker + "stonebraker", + + // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson + "swanson", + + // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz + "swartz", + + // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles + "swirles", + + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla + "tesla", + + // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson + "thompson", + + // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds + "torvalds", + + // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. + "turing", + + // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions + "varahamihira", + + // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya + "visvesvaraya", + + // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard + "volhard", + + // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer + "wescoff", + + // Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https://en.wikipedia.org/wiki/Andrew_Wiles + "wiles", + + // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams + "williams", + + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson + "wilson", + + // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing + "wing", + + // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak + "wozniak", + + // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers + "wright", + + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow + "yalow", + + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath + "yonath", + } +) + +// GetRandomName generates a random name from the list of adjectives and surnames in this package +// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random +// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` +func GetRandomName(retry int) string { + rnd := random.Rand +begin: + name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin + } + + if retry > 0 { + name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) + } + return name +} diff --git a/vendor/github.com/docker/docker/pkg/random/random.go b/vendor/github.com/docker/docker/pkg/random/random.go new file mode 100644 index 000000000..70de4d130 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/random/random.go @@ -0,0 +1,71 @@ +package random + +import ( + cryptorand "crypto/rand" + "io" + "math" + "math/big" + "math/rand" + "sync" + "time" +) + +// Rand is a global *rand.Rand instance, which initialized with NewSource() source. +var Rand = rand.New(NewSource()) + +// Reader is a global, shared instance of a pseudorandom bytes generator. +// It doesn't consume entropy. +var Reader io.Reader = &reader{rnd: Rand} + +// copypaste from standard math/rand +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// NewSource returns math/rand.Source safe for concurrent use and initialized +// with current unix-nano timestamp +func NewSource() rand.Source { + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + return &lockedSource{ + src: rand.NewSource(seed), + } +} + +type reader struct { + rnd *rand.Rand +} + +func (r *reader) Read(b []byte) (int, error) { + i := 0 + for { + val := r.rnd.Int63() + for val > 0 { + b[i] = byte(val) + i++ + if i == len(b) { + return i, nil + } + val >>= 8 + } + } +} diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE new file mode 100644 index 000000000..8d9a94a90 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2005-2008 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown new file mode 100644 index 000000000..f69d3c870 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/README.markdown @@ -0,0 +1,92 @@ +# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) + +Just a few functions for helping humanize times and sizes. + +`go get` it as `github.com/dustin/go-humanize`, import it as +`"github.com/dustin/go-humanize"`, use it as `humanize`. + +See [godoc](https://godoc.org/github.com/dustin/go-humanize) for +complete documentation. + +## Sizes + +This lets you take numbers like `82854982` and convert them to useful +strings like, `83 MB` or `79 MiB` (whichever you prefer). + +Example: + +```go +fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. +``` + +## Times + +This lets you take a `time.Time` and spit it out in relative terms. +For example, `12 seconds ago` or `3 days from now`. + +Example: + +```go +fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. +``` + +Thanks to Kyle Lemons for the time implementation from an IRC +conversation one day. It's pretty neat. + +## Ordinals + +From a [mailing list discussion][odisc] where a user wanted to be able +to label ordinals. + + 0 -> 0th + 1 -> 1st + 2 -> 2nd + 3 -> 3rd + 4 -> 4th + [...] + +Example: + +```go +fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. +``` + +## Commas + +Want to shove commas into numbers? Be my guest. + + 0 -> 0 + 100 -> 100 + 1000 -> 1,000 + 1000000000 -> 1,000,000,000 + -100000 -> -100,000 + +Example: + +```go +fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. +``` + +## Ftoa + +Nicer float64 formatter that removes trailing zeros. + +```go +fmt.Printf("%f", 2.24) // 2.240000 +fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 +fmt.Printf("%f", 2.0) // 2.000000 +fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 +``` + +## SI notation + +Format numbers with [SI notation][sinotation]. + +Example: + +```go +humanize.SI(0.00000000223, "M") // 2.23 nM +``` + +[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion +[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go new file mode 100644 index 000000000..f49dc337d --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/big.go @@ -0,0 +1,31 @@ +package humanize + +import ( + "math/big" +) + +// order of magnitude (to a max order) +func oomm(n, b *big.Int, maxmag int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + if mag == maxmag && maxmag >= 0 { + break + } + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} + +// total order of magnitude +// (same as above, but with no upper limit) +func oom(n, b *big.Int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go new file mode 100644 index 000000000..1a2bf6172 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bigbytes.go @@ -0,0 +1,173 @@ +package humanize + +import ( + "fmt" + "math/big" + "strings" + "unicode" +) + +var ( + bigIECExp = big.NewInt(1024) + + // BigByte is one byte in bit.Ints + BigByte = big.NewInt(1) + // BigKiByte is 1,024 bytes in bit.Ints + BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) + // BigMiByte is 1,024 k bytes in bit.Ints + BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) + // BigGiByte is 1,024 m bytes in bit.Ints + BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) + // BigTiByte is 1,024 g bytes in bit.Ints + BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) + // BigPiByte is 1,024 t bytes in bit.Ints + BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) + // BigEiByte is 1,024 p bytes in bit.Ints + BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) + // BigZiByte is 1,024 e bytes in bit.Ints + BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) + // BigYiByte is 1,024 z bytes in bit.Ints + BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) +) + +var ( + bigSIExp = big.NewInt(1000) + + // BigSIByte is one SI byte in big.Ints + BigSIByte = big.NewInt(1) + // BigKByte is 1,000 SI bytes in big.Ints + BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) + // BigMByte is 1,000 SI k bytes in big.Ints + BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) + // BigGByte is 1,000 SI m bytes in big.Ints + BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) + // BigTByte is 1,000 SI g bytes in big.Ints + BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) + // BigPByte is 1,000 SI t bytes in big.Ints + BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) + // BigEByte is 1,000 SI p bytes in big.Ints + BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) + // BigZByte is 1,000 SI e bytes in big.Ints + BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) + // BigYByte is 1,000 SI z bytes in big.Ints + BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) +) + +var bigBytesSizeTable = map[string]*big.Int{ + "b": BigByte, + "kib": BigKiByte, + "kb": BigKByte, + "mib": BigMiByte, + "mb": BigMByte, + "gib": BigGiByte, + "gb": BigGByte, + "tib": BigTiByte, + "tb": BigTByte, + "pib": BigPiByte, + "pb": BigPByte, + "eib": BigEiByte, + "eb": BigEByte, + "zib": BigZiByte, + "zb": BigZByte, + "yib": BigYiByte, + "yb": BigYByte, + // Without suffix + "": BigByte, + "ki": BigKiByte, + "k": BigKByte, + "mi": BigMiByte, + "m": BigMByte, + "gi": BigGiByte, + "g": BigGByte, + "ti": BigTiByte, + "t": BigTByte, + "pi": BigPiByte, + "p": BigPByte, + "ei": BigEiByte, + "e": BigEByte, + "z": BigZByte, + "zi": BigZiByte, + "y": BigYByte, + "yi": BigYiByte, +} + +var ten = big.NewInt(10) + +func humanateBigBytes(s, base *big.Int, sizes []string) string { + if s.Cmp(ten) < 0 { + return fmt.Sprintf("%d B", s) + } + c := (&big.Int{}).Set(s) + val, mag := oomm(c, base, len(sizes)-1) + suffix := sizes[mag] + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) + +} + +// BigBytes produces a human readable representation of an SI size. +// +// See also: ParseBigBytes. +// +// BigBytes(82854982) -> 83 MB +func BigBytes(s *big.Int) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + return humanateBigBytes(s, bigSIExp, sizes) +} + +// BigIBytes produces a human readable representation of an IEC size. +// +// See also: ParseBigBytes. +// +// BigIBytes(82854982) -> 79 MiB +func BigIBytes(s *big.Int) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + return humanateBigBytes(s, bigIECExp, sizes) +} + +// ParseBigBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See also: BigBytes, BigIBytes. +// +// ParseBigBytes("42 MB") -> 42000000, nil +// ParseBigBytes("42 mib") -> 44040192, nil +func ParseBigBytes(s string) (*big.Int, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + val := &big.Rat{} + _, err := fmt.Sscanf(num, "%f", val) + if err != nil { + return nil, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bigBytesSizeTable[extra]; ok { + mv := (&big.Rat{}).SetInt(m) + val.Mul(val, mv) + rv := &big.Int{} + rv.Div(val.Num(), val.Denom()) + return rv, nil + } + + return nil, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go new file mode 100644 index 000000000..0b498f488 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bytes.go @@ -0,0 +1,143 @@ +package humanize + +import ( + "fmt" + "math" + "strconv" + "strings" + "unicode" +) + +// IEC Sizes. +// kibis of bits +const ( + Byte = 1 << (iota * 10) + KiByte + MiByte + GiByte + TiByte + PiByte + EiByte +) + +// SI Sizes. +const ( + IByte = 1 + KByte = IByte * 1000 + MByte = KByte * 1000 + GByte = MByte * 1000 + TByte = GByte * 1000 + PByte = TByte * 1000 + EByte = PByte * 1000 +) + +var bytesSizeTable = map[string]uint64{ + "b": Byte, + "kib": KiByte, + "kb": KByte, + "mib": MiByte, + "mb": MByte, + "gib": GiByte, + "gb": GByte, + "tib": TiByte, + "tb": TByte, + "pib": PiByte, + "pb": PByte, + "eib": EiByte, + "eb": EByte, + // Without suffix + "": Byte, + "ki": KiByte, + "k": KByte, + "mi": MiByte, + "m": MByte, + "gi": GiByte, + "g": GByte, + "ti": TiByte, + "t": TByte, + "pi": PiByte, + "p": PByte, + "ei": EiByte, + "e": EByte, +} + +func logn(n, b float64) float64 { + return math.Log(n) / math.Log(b) +} + +func humanateBytes(s uint64, base float64, sizes []string) string { + if s < 10 { + return fmt.Sprintf("%d B", s) + } + e := math.Floor(logn(float64(s), base)) + suffix := sizes[int(e)] + val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) +} + +// Bytes produces a human readable representation of an SI size. +// +// See also: ParseBytes. +// +// Bytes(82854982) -> 83 MB +func Bytes(s uint64) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} + return humanateBytes(s, 1000, sizes) +} + +// IBytes produces a human readable representation of an IEC size. +// +// See also: ParseBytes. +// +// IBytes(82854982) -> 79 MiB +func IBytes(s uint64) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} + return humanateBytes(s, 1024, sizes) +} + +// ParseBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See Also: Bytes, IBytes. +// +// ParseBytes("42 MB") -> 42000000, nil +// ParseBytes("42 mib") -> 44040192, nil +func ParseBytes(s string) (uint64, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + f, err := strconv.ParseFloat(num, 64) + if err != nil { + return 0, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bytesSizeTable[extra]; ok { + f *= float64(m) + if f >= math.MaxUint64 { + return 0, fmt.Errorf("too large: %v", s) + } + return uint64(f), nil + } + + return 0, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go new file mode 100644 index 000000000..eb285cb95 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/comma.go @@ -0,0 +1,108 @@ +package humanize + +import ( + "bytes" + "math" + "math/big" + "strconv" + "strings" +) + +// Comma produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Comma(834142) -> 834,142 +func Comma(v int64) string { + sign := "" + + // minin64 can't be negated to a usable value, so it has to be special cased. + if v == math.MinInt64 { + return "-9,223,372,036,854,775,808" + } + + if v < 0 { + sign = "-" + v = 0 - v + } + + parts := []string{"", "", "", "", "", "", ""} + j := len(parts) - 1 + + for v > 999 { + parts[j] = strconv.FormatInt(v%1000, 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + v = v / 1000 + j-- + } + parts[j] = strconv.Itoa(int(v)) + return sign + strings.Join(parts[j:], ",") +} + +// Commaf produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Commaf(834142.32) -> 834,142.32 +func Commaf(v float64) string { + buf := &bytes.Buffer{} + if v < 0 { + buf.Write([]byte{'-'}) + v = 0 - v + } + + comma := []byte{','} + + parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} + +// BigComma produces a string form of the given big.Int in base 10 +// with commas after every three orders of magnitude. +func BigComma(b *big.Int) string { + sign := "" + if b.Sign() < 0 { + sign = "-" + b.Abs(b) + } + + athousand := big.NewInt(1000) + c := (&big.Int{}).Set(b) + _, m := oom(c, athousand) + parts := make([]string, m+1) + j := len(parts) - 1 + + mod := &big.Int{} + for b.Cmp(athousand) >= 0 { + b.DivMod(b, athousand, mod) + parts[j] = strconv.FormatInt(mod.Int64(), 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + j-- + } + parts[j] = strconv.Itoa(int(b.Int64())) + return sign + strings.Join(parts[j:], ",") +} diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go new file mode 100644 index 000000000..620690dec --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/commaf.go @@ -0,0 +1,40 @@ +// +build go1.6 + +package humanize + +import ( + "bytes" + "math/big" + "strings" +) + +// BigCommaf produces a string form of the given big.Float in base 10 +// with commas after every three orders of magnitude. +func BigCommaf(v *big.Float) string { + buf := &bytes.Buffer{} + if v.Sign() < 0 { + buf.Write([]byte{'-'}) + v.Abs(v) + } + + comma := []byte{','} + + parts := strings.Split(v.Text('f', -1), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go new file mode 100644 index 000000000..c76190b10 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ftoa.go @@ -0,0 +1,23 @@ +package humanize + +import "strconv" + +func stripTrailingZeros(s string) string { + offset := len(s) - 1 + for offset > 0 { + if s[offset] == '.' { + offset-- + break + } + if s[offset] != '0' { + break + } + offset-- + } + return s[:offset+1] +} + +// Ftoa converts a float to a string with no trailing zeros. +func Ftoa(num float64) string { + return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) +} diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go new file mode 100644 index 000000000..a2c2da31e --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/humanize.go @@ -0,0 +1,8 @@ +/* +Package humanize converts boring ugly numbers to human-friendly strings and back. + +Durations can be turned into strings such as "3 days ago", numbers +representing sizes like 82854982 into useful strings like, "83 MB" or +"79 MiB" (whichever you prefer). +*/ +package humanize diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go new file mode 100644 index 000000000..dec618659 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/number.go @@ -0,0 +1,192 @@ +package humanize + +/* +Slightly adapted from the source to fit go-humanize. + +Author: https://github.com/gorhill +Source: https://gist.github.com/gorhill/5285193 + +*/ + +import ( + "math" + "strconv" +) + +var ( + renderFloatPrecisionMultipliers = [...]float64{ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + } + + renderFloatPrecisionRounders = [...]float64{ + 0.5, + 0.05, + 0.005, + 0.0005, + 0.00005, + 0.000005, + 0.0000005, + 0.00000005, + 0.000000005, + 0.0000000005, + } +) + +// FormatFloat produces a formatted number as string based on the following user-specified criteria: +// * thousands separator +// * decimal separator +// * decimal precision +// +// Usage: s := RenderFloat(format, n) +// The format parameter tells how to render the number n. +// +// See examples: http://play.golang.org/p/LXc1Ddm1lJ +// +// Examples of format strings, given n = 12345.6789: +// "#,###.##" => "12,345.67" +// "#,###." => "12,345" +// "#,###" => "12345,678" +// "#\u202F###,##" => "12 345,68" +// "#.###,###### => 12.345,678900 +// "" (aka default format) => 12,345.67 +// +// The highest precision allowed is 9 digits after the decimal symbol. +// There is also a version for integer number, FormatInteger(), +// which is convenient for calls within template. +func FormatFloat(format string, n float64) string { + // Special cases: + // NaN = "NaN" + // +Inf = "+Infinity" + // -Inf = "-Infinity" + if math.IsNaN(n) { + return "NaN" + } + if n > math.MaxFloat64 { + return "Infinity" + } + if n < -math.MaxFloat64 { + return "-Infinity" + } + + // default format + precision := 2 + decimalStr := "." + thousandStr := "," + positiveStr := "" + negativeStr := "-" + + if len(format) > 0 { + format := []rune(format) + + // If there is an explicit format directive, + // then default values are these: + precision = 9 + thousandStr = "" + + // collect indices of meaningful formatting directives + formatIndx := []int{} + for i, char := range format { + if char != '#' && char != '0' { + formatIndx = append(formatIndx, i) + } + } + + if len(formatIndx) > 0 { + // Directive at index 0: + // Must be a '+' + // Raise an error if not the case + // index: 0123456789 + // +0.000,000 + // +000,000.0 + // +0000.00 + // +0000 + if formatIndx[0] == 0 { + if format[formatIndx[0]] != '+' { + panic("RenderFloat(): invalid positive sign directive") + } + positiveStr = "+" + formatIndx = formatIndx[1:] + } + + // Two directives: + // First is thousands separator + // Raise an error if not followed by 3-digit + // 0123456789 + // 0.000,000 + // 000,000.00 + if len(formatIndx) == 2 { + if (formatIndx[1] - formatIndx[0]) != 4 { + panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") + } + thousandStr = string(format[formatIndx[0]]) + formatIndx = formatIndx[1:] + } + + // One directive: + // Directive is decimal separator + // The number of digit-specifier following the separator indicates wanted precision + // 0123456789 + // 0.00 + // 000,0000 + if len(formatIndx) == 1 { + decimalStr = string(format[formatIndx[0]]) + precision = len(format) - formatIndx[0] - 1 + } + } + } + + // generate sign part + var signStr string + if n >= 0.000000001 { + signStr = positiveStr + } else if n <= -0.000000001 { + signStr = negativeStr + n = -n + } else { + signStr = "" + n = 0.0 + } + + // split number into integer and fractional parts + intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) + + // generate integer part string + intStr := strconv.FormatInt(int64(intf), 10) + + // add thousand separator if required + if len(thousandStr) > 0 { + for i := len(intStr); i > 3; { + i -= 3 + intStr = intStr[:i] + thousandStr + intStr[i:] + } + } + + // no fractional part, we can leave now + if precision == 0 { + return signStr + intStr + } + + // generate fractional part + fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) + // may need padding + if len(fracStr) < precision { + fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr + } + + return signStr + intStr + decimalStr + fracStr +} + +// FormatInteger produces a formatted number as string. +// See FormatFloat. +func FormatInteger(format string, n int) string { + return FormatFloat(format, float64(n)) +} diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go new file mode 100644 index 000000000..43d88a861 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ordinals.go @@ -0,0 +1,25 @@ +package humanize + +import "strconv" + +// Ordinal gives you the input number in a rank/ordinal format. +// +// Ordinal(3) -> 3rd +func Ordinal(x int) string { + suffix := "th" + switch x % 10 { + case 1: + if x%100 != 11 { + suffix = "st" + } + case 2: + if x%100 != 12 { + suffix = "nd" + } + case 3: + if x%100 != 13 { + suffix = "rd" + } + } + return strconv.Itoa(x) + suffix +} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go new file mode 100644 index 000000000..b24e48169 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/si.go @@ -0,0 +1,113 @@ +package humanize + +import ( + "errors" + "math" + "regexp" + "strconv" +) + +var siPrefixTable = map[float64]string{ + -24: "y", // yocto + -21: "z", // zepto + -18: "a", // atto + -15: "f", // femto + -12: "p", // pico + -9: "n", // nano + -6: "µ", // micro + -3: "m", // milli + 0: "", + 3: "k", // kilo + 6: "M", // mega + 9: "G", // giga + 12: "T", // tera + 15: "P", // peta + 18: "E", // exa + 21: "Z", // zetta + 24: "Y", // yotta +} + +var revSIPrefixTable = revfmap(siPrefixTable) + +// revfmap reverses the map and precomputes the power multiplier +func revfmap(in map[float64]string) map[string]float64 { + rv := map[string]float64{} + for k, v := range in { + rv[v] = math.Pow(10, k) + } + return rv +} + +var riParseRegex *regexp.Regexp + +func init() { + ri := `^([\-0-9.]+)\s?([` + for _, v := range siPrefixTable { + ri += v + } + ri += `]?)(.*)` + + riParseRegex = regexp.MustCompile(ri) +} + +// ComputeSI finds the most appropriate SI prefix for the given number +// and returns the prefix along with the value adjusted to be within +// that prefix. +// +// See also: SI, ParseSI. +// +// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") +func ComputeSI(input float64) (float64, string) { + if input == 0 { + return 0, "" + } + mag := math.Abs(input) + exponent := math.Floor(logn(mag, 10)) + exponent = math.Floor(exponent/3) * 3 + + value := mag / math.Pow(10, exponent) + + // Handle special case where value is exactly 1000.0 + // Should return 1 M instead of 1000 k + if value == 1000.0 { + exponent += 3 + value = mag / math.Pow(10, exponent) + } + + value = math.Copysign(value, input) + + prefix := siPrefixTable[exponent] + return value, prefix +} + +// SI returns a string with default formatting. +// +// SI uses Ftoa to format float value, removing trailing zeros. +// +// See also: ComputeSI, ParseSI. +// +// e.g. SI(1000000, "B") -> 1 MB +// e.g. SI(2.2345e-12, "F") -> 2.2345 pF +func SI(input float64, unit string) string { + value, prefix := ComputeSI(input) + return Ftoa(value) + " " + prefix + unit +} + +var errInvalid = errors.New("invalid input") + +// ParseSI parses an SI string back into the number and unit. +// +// See also: SI, ComputeSI. +// +// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil) +func ParseSI(input string) (float64, string, error) { + found := riParseRegex.FindStringSubmatch(input) + if len(found) != 4 { + return 0, "", errInvalid + } + mag := revSIPrefixTable[found[2]] + unit := found[3] + + base, err := strconv.ParseFloat(found[1], 64) + return base * mag, unit, err +} diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go new file mode 100644 index 000000000..b311f11c8 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/times.go @@ -0,0 +1,117 @@ +package humanize + +import ( + "fmt" + "math" + "sort" + "time" +) + +// Seconds-based time units +const ( + Day = 24 * time.Hour + Week = 7 * Day + Month = 30 * Day + Year = 12 * Month + LongTime = 37 * Year +) + +// Time formats a time into a relative string. +// +// Time(someT) -> "3 weeks ago" +func Time(then time.Time) string { + return RelTime(then, time.Now(), "ago", "from now") +} + +// A RelTimeMagnitude struct contains a relative time point at which +// the relative format of time will switch to a new format string. A +// slice of these in ascending order by their "D" field is passed to +// CustomRelTime to format durations. +// +// The Format field is a string that may contain a "%s" which will be +// replaced with the appropriate signed label (e.g. "ago" or "from +// now") and a "%d" that will be replaced by the quantity. +// +// The DivBy field is the amount of time the time difference must be +// divided by in order to display correctly. +// +// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" +// DivBy should be time.Minute so whatever the duration is will be +// expressed in minutes. +type RelTimeMagnitude struct { + D time.Duration + Format string + DivBy time.Duration +} + +var defaultMagnitudes = []RelTimeMagnitude{ + {time.Second, "now", time.Second}, + {2 * time.Second, "1 second %s", 1}, + {time.Minute, "%d seconds %s", time.Second}, + {2 * time.Minute, "1 minute %s", 1}, + {time.Hour, "%d minutes %s", time.Minute}, + {2 * time.Hour, "1 hour %s", 1}, + {Day, "%d hours %s", time.Hour}, + {2 * Day, "1 day %s", 1}, + {Week, "%d days %s", Day}, + {2 * Week, "1 week %s", 1}, + {Month, "%d weeks %s", Week}, + {2 * Month, "1 month %s", 1}, + {Year, "%d months %s", Month}, + {18 * Month, "1 year %s", 1}, + {2 * Year, "2 years %s", 1}, + {LongTime, "%d years %s", Year}, + {math.MaxInt64, "a long while %s", 1}, +} + +// RelTime formats a time into a relative string. +// +// It takes two times and two labels. In addition to the generic time +// delta string (e.g. 5 minutes), the labels are used applied so that +// the label corresponding to the smaller time is applied. +// +// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" +func RelTime(a, b time.Time, albl, blbl string) string { + return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) +} + +// CustomRelTime formats a time into a relative string. +// +// It takes two times two labels and a table of relative time formats. +// In addition to the generic time delta string (e.g. 5 minutes), the +// labels are used applied so that the label corresponding to the +// smaller time is applied. +func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { + lbl := albl + diff := b.Sub(a) + + if a.After(b) { + lbl = blbl + diff = a.Sub(b) + } + + n := sort.Search(len(magnitudes), func(i int) bool { + return magnitudes[i].D >= diff + }) + + if n >= len(magnitudes) { + n = len(magnitudes) - 1 + } + mag := magnitudes[n] + args := []interface{}{} + escaped := false + for _, ch := range mag.Format { + if escaped { + switch ch { + case 's': + args = append(args, lbl) + case 'd': + args = append(args, diff/mag.DivBy) + } + escaped = false + } else { + escaped = ch == '%' + } + } + return fmt.Sprintf(mag.Format, args...) +} diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 000000000..b003eca0c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,8 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 000000000..9171c9722 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 000000000..33c3d2be3 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,64 @@ +# Gorilla WebSocket + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + +[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket) +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) + +### Documentation + +* [API Reference](http://godoc.org/github.com/gorilla/websocket) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + +### Gorilla WebSocket compared with other packages + + + + + + + + + + + + + + + + + + +
github.com/gorillagolang.org/x/net
RFC 6455 Features
Passes Autobahn Test SuiteYesNo
Receive fragmented messageYesNo, see note 1
Send close messageYesNo
Send pings and receive pongsYesNo
Get the type of a received data messageYesYes, see note 2
Other Features
Compression ExtensionsExperimentalNo
Read message using io.ReaderYesNo, see note 3
Write message using io.WriteCloserYesNo, see note 3
+ +Notes: + +1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). +2. The application can get the type of a received data message by implementing + a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) + function. +3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. + Read returns when the input buffer is full or a frame boundary is + encountered. Each call to Write sends a single frame message. The Gorilla + io.Reader and io.WriteCloser operate on a single WebSocket message. + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 000000000..43a87c753 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,392 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "bytes" + "crypto/tls" + "encoding/base64" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +// parseURL parses the URL. +// +// This function is a replacement for the standard library url.Parse function. +// In Go 1.4 and earlier, url.Parse loses information from the path. +func parseURL(s string) (*url.URL, error) { + // From the RFC: + // + // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ] + // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ] + var u url.URL + switch { + case strings.HasPrefix(s, "ws://"): + u.Scheme = "ws" + s = s[len("ws://"):] + case strings.HasPrefix(s, "wss://"): + u.Scheme = "wss" + s = s[len("wss://"):] + default: + return nil, errMalformedURL + } + + if i := strings.Index(s, "?"); i >= 0 { + u.RawQuery = s[i+1:] + s = s[:i] + } + + if i := strings.Index(s, "/"); i >= 0 { + u.Opaque = s[i:] + s = s[:i] + } else { + u.Opaque = "/" + } + + u.Host = s + + if strings.Contains(u.Host, "@") { + // Don't bother parsing user information because user information is + // not allowed in websocket URIs. + return nil, errMalformedURL + } + + return &u, nil +} + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default zero values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, +} + +// Dial creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + + if d == nil { + d = &Dialer{ + Proxy: http.ProxyFromEnvironment, + } + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := parseURL(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover") + } + + hostPort, hostNoPort := hostPortNoPort(u) + + var proxyURL *url.URL + // Check wether the proxy method has been configured + if d.Proxy != nil { + proxyURL, err = d.Proxy(req) + } + if err != nil { + return nil, nil, err + } + + var targetHostPort string + if proxyURL != nil { + targetHostPort, _ = hostPortNoPort(proxyURL) + } else { + targetHostPort = hostPort + } + + var deadline time.Time + if d.HandshakeTimeout != 0 { + deadline = time.Now().Add(d.HandshakeTimeout) + } + + netDial := d.NetDial + if netDial == nil { + netDialer := &net.Dialer{Deadline: deadline} + netDial = netDialer.Dial + } + + netConn, err := netDial("tcp", targetHostPort) + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if err := netConn.SetDeadline(deadline); err != nil { + return nil, nil, err + } + + if proxyURL != nil { + connectHeader := make(http.Header) + if user := proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: hostPort}, + Host: hostPort, + Header: connectHeader, + } + + connectReq.Write(netConn) + + // Read response. + // Okay to use and discard buffered reader here, because + // TLS server will not speak until spoken to. + br := bufio.NewReader(netConn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + return nil, nil, err + } + if resp.StatusCode != 200 { + f := strings.SplitN(resp.Status, " ", 2) + return nil, nil, errors.New(f[1]) + } + } + + if u.Scheme == "https" { + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + if err := tlsConn.Handshake(); err != nil { + return nil, nil, err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return nil, nil, err + } + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go new file mode 100644 index 000000000..4f0d94372 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go new file mode 100644 index 000000000..babb007fb --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone_legacy.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +import "crypto/tls" + +// cloneTLSConfig clones all public fields except the fields +// SessionTicketsDisabled and SessionTicketKey. This avoids copying the +// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a +// config in active use. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 000000000..813ffb1e8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 000000000..97e1dbacb --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1149 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents close frame. +type CloseError struct { + + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan bool // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + readRemaining int64 // bytes remaining in current frame. + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { + return newConnBRW(conn, isServer, readBufferSize, writeBufferSize, nil) +} + +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, brw *bufio.ReadWriter) *Conn { + mu := make(chan bool, 1) + mu <- true + + var br *bufio.Reader + if readBufferSize == 0 && brw != nil && brw.Reader != nil { + // Reuse the supplied bufio.Reader if the buffer has a useful size. + // This code assumes that peek on a reader returns + // bufio.Reader.buf[:0]. + brw.Reader.Reset(conn) + if p, err := brw.Reader.Peek(0); err == nil && cap(p) >= 256 { + br = brw.Reader + } + } + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } + if readBufferSize < maxControlFramePayloadSize { + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + var writeBuf []byte + if writeBufferSize == 0 && brw != nil && brw.Writer != nil { + // Use the bufio.Writer's buffer if the buffer has a useful size. This + // code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + brw.Writer.Reset(&wh) + brw.Writer.WriteByte(0) + brw.Flush() + if cap(wh.p) >= maxFrameHeaderSize+256 { + writeBuf = wh.p[:cap(wh.p)] + } + } + + if writeBuf == nil { + if writeBufferSize == 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBuf = make([]byte, writeBufferSize+maxFrameHeaderSize) + } + + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting for a close frame. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { + <-c.mu + defer func() { c.mu <- true }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + for _, buf := range bufs { + if len(buf) > 0 { + _, err := c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + } + } + + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return nil +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := time.Hour * 1000 + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- true }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +func (c *Conn) prepWrite(messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + return err +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + if err := c.prepWrite(messageType); err != nil { + return nil, err + } + + mw := &messageWriter{ + c: c, + frameType: messageType, + pos: maxFrameHeaderSize, + } + c.writer = mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) fatal(err error) error { + if w.err != nil { + w.err = err + w.c.writer = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.fatal(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return c.writeFatal(errors.New("websocket: internal error, extra used in client mode")) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.fatal(err) + } + + if final { + c.writer = nil + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + if err := w.flushFrame(true, nil); err != nil { + return err + } + w.err = errWriteClosed + return nil +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + if err := c.prepWrite(messageType); err != nil { + return err + } + mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize} + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + final := p[0]&finalBit != 0 + frameType := int(p[0] & 0xf) + mask := p[1]&maskBit != 0 + c.readRemaining = int64(p[1] & 0x7f) + + c.readDecompress = false + if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { + c.readDecompress = true + p[0] &^= rsv1Bit + } + + if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + return noFrame, c.handleProtocolError("control frame length > 125") + } + if !final { + return noFrame, c.handleProtocolError("control frame not final") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + return noFrame, c.handleProtocolError("message start before final message frame") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + return noFrame, c.handleProtocolError("continuation after final message frame") + } + c.readFinal = final + default: + return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) + } + + // 3. Read and parse frame length. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint16(p)) + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint64(p)) + } + + // 4. Handle frame masking. + + if mask != c.isServer { + return noFrame, c.handleProtocolError("incorrect mask flag") + } + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + c.readRemaining = 0 + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("invalid close code") + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + c.readRemaining -= int64(n) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size for a message read from the peer. If a +// message exceeds the limit, the connection sends a close frame to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close frame +// back to the peer. +// +// The application must read the connection to process close messages as +// described in the section on Control Frames above. +// +// The connection read methods return a CloseError when a close frame is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close frame back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := []byte{} + if code != CloseNoStatusReceived { + message = FormatCloseMessage(code, "") + } + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING frame application data. The default +// ping handler sends a pong to the peer. +// +// The application must read the connection to process ping messages as +// described in the section on Control Frames above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG frame application data. The default +// pong handler does nothing. +// +// The application must read the connection to process ping messages as +// described in the section on Control Frames above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +func FormatCloseMessage(closeCode int, text string) []byte { + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/conn_read.go b/vendor/github.com/gorilla/websocket/conn_read.go new file mode 100644 index 000000000..1ea15059e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_read.go @@ -0,0 +1,18 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package websocket + +import "io" + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} diff --git a/vendor/github.com/gorilla/websocket/conn_read_legacy.go b/vendor/github.com/gorilla/websocket/conn_read_legacy.go new file mode 100644 index 000000000..018541cf6 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_read_legacy.go @@ -0,0 +1,21 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package websocket + +import "io" + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + if len(p) > 0 { + // advance over the bytes just read + io.ReadFull(c.br, p) + } + return p, err +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 000000000..e291a952c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,180 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application uses +// the Upgrade function from an Upgrader object with a HTTP request handler +// to get a pointer to a Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// return +// } +// if err = conn.WriteMessage(messageType, p); err != nil { +// return err +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by sending a close message to the +// peer and returning a *CloseError from the the NextReader, ReadMessage or the +// message Read method. +// +// Connections handle received ping and pong messages by invoking callback +// functions set with SetPingHandler and SetPongHandler methods. The callback +// functions are called from the NextReader, ReadMessage and the message Read +// methods. +// +// The default ping handler sends a pong to the peer. The application's reading +// goroutine can block for a short time while the handler writes the pong data +// to the connection. +// +// The application must read the connection to process ping, pong and close +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and not equal to the +// Host request header. +// +// An application can allow connections from any origin by specifying a +// function that always returns true: +// +// var upgrader = websocket.Upgrader{ +// CheckOrigin: func(r *http.Request) bool { return true }, +// } +// +// The deprecated Upgrade function does not enforce an origin policy. It's the +// application's responsibility to check the Origin header before calling +// Upgrade. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 000000000..4f0e36875 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON is deprecated, use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v to the connection. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON is deprecated, use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 000000000..6a88bbc74 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,55 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 000000000..1efffbd1e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,103 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + err error + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan bool, 1) + mu <- true + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 000000000..3495e0f1a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,291 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is set, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, the host in the Origin header must not be set or + // must match the host of the request. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return u.Host == r.Host +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// application negotiated subprotocol (Sec-Websocket-Protocol). +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + if r.Method != "GET" { + return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported") + } + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + var ( + netConn net.Conn + err error + ) + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err = h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + p := c.writeBuf[:0] + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-Websocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-Websocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// This function is deprecated, use websocket.Upgrader instead. +// +// The application is responsible for checking the request origin before +// calling Upgrade. An example implementation of the same origin policy is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", 403) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 000000000..9a4908df2 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,214 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Octet types from RFC 2616. +var octetTypes [256]byte + +const ( + isTokenOctet = 1 << iota + isSpaceOctet +) + +func init() { + // From RFC 2616 + // + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t byte + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpaceOctet + } + if isChar && !isCtl && !isSeparator { + t |= isTokenOctet + } + octetTypes[c] = t + } +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpaceOctet == 0 { + break + } + } + return s[i:] +} + +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isTokenOctet == 0 { + break + } + } + return s[:i], s[i:] +} + +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j += 1 + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j += 1 + } + } + return "", "" + } + } + return "", "" +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains token. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if strings.EqualFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensiosn parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/vendor/github.com/moul/anonuuid/LICENSE b/vendor/github.com/moul/anonuuid/LICENSE new file mode 100644 index 000000000..492e2c629 --- /dev/null +++ b/vendor/github.com/moul/anonuuid/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Manfred Touron + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/moul/anonuuid/Makefile b/vendor/github.com/moul/anonuuid/Makefile new file mode 100644 index 000000000..4e2f3bb44 --- /dev/null +++ b/vendor/github.com/moul/anonuuid/Makefile @@ -0,0 +1,73 @@ +# Project-specific variables +BINARIES ?= anonuuid +CONVEY_PORT ?= 9042 + + +# Common variables +SOURCES := $(shell find . -name "*.go") +COMMANDS := $(shell go list ./... | grep -v /vendor/ | grep /cmd/) +PACKAGES := $(shell go list ./... | grep -v /vendor/ | grep -v /cmd/) +GOENV ?= GO15VENDOREXPERIMENT=1 +GO ?= $(GOENV) go +GODEP ?= $(GOENV) godep +USER ?= $(shell whoami) + + +all: build + + +.PHONY: build +build: $(BINARIES) + + +$(BINARIES): $(SOURCES) + $(GO) build -o $@ ./cmd/$@ + + +.PHONY: test +test: + $(GO) get -t . + $(GO) test -v . + + +.PHONY: godep-save +godep-save: + $(GODEP) save $(PACKAGES) $(COMMANDS) + + +.PHONY: clean +clean: + rm -f $(BINARIES) + + +.PHONY: re +re: clean all + + +.PHONY: convey +convey: + $(GO) get github.com/smartystreets/goconvey + goconvey -cover -port=$(CONVEY_PORT) -workDir="$(realpath .)" -depth=1 + + +.PHONY: cover +cover: profile.out + + +profile.out: $(SOURCES) + rm -f $@ + $(GO) test -covermode=count -coverpkg=. -coverprofile=$@ . + + +.PHONY: docker-build +docker-build: + go get github.com/laher/goxc + rm -rf contrib/docker/linux_386 + for binary in $(BINARIES); do \ + goxc -bc="linux,386" -d . -pv contrib/docker -n $$binary xc; \ + mv contrib/docker/linux_386/$$binary contrib/docker/entrypoint; \ + docker build -t $(USER)/$$binary contrib/docker; \ + docker run -it --rm $(USER)/$$binary || true; \ + docker inspect --type=image --format="{{ .Id }}" moul/$$binary || true; \ + echo "Now you can run 'docker push $(USER)/$$binary'"; \ + done diff --git a/vendor/github.com/moul/anonuuid/README.md b/vendor/github.com/moul/anonuuid/README.md new file mode 100644 index 000000000..f5c9a57eb --- /dev/null +++ b/vendor/github.com/moul/anonuuid/README.md @@ -0,0 +1,170 @@ +# AnonUUID + +[![Build Status](https://travis-ci.org/moul/anonuuid.svg)](https://travis-ci.org/moul/anonuuid) +[![GoDoc](https://godoc.org/github.com/moul/anonuuid?status.svg)](https://godoc.org/github.com/moul/anonuuid) +[![Coverage Status](https://coveralls.io/repos/moul/anonuuid/badge.svg?branch=master&service=github)](https://coveralls.io/github/moul/anonuuid?branch=master) + +:wrench: Anonymize UUIDs outputs (written in Golang) + +![AnonUUID Logo](https://raw.githubusercontent.com/moul/anonuuid/master/assets/anonuuid.png) + +**anonuuid** anonymize an input string by replacing all UUIDs by an anonymized +new one. + +The fake UUIDs are cached, so if AnonUUID encounter the same real UUIDs multiple +times, the translation will be the same. + +## Usage + +```console +$ anonuuid --help +NAME: + anonuuid - Anonymize UUIDs outputs + +USAGE: + anonuuid [global options] command [command options] [arguments...] + +VERSION: + 1.0.0-dev + +AUTHOR(S): + Manfred Touron + +COMMANDS: + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS: + --hexspeak Generate hexspeak style fake UUIDs + --random, -r Generate random fake UUIDs + --keep-beginning Keep first part of the UUID unchanged + --keep-end Keep last part of the UUID unchanged + --prefix, -p Prefix generated UUIDs + --suffix Suffix generated UUIDs + --help, -h show help + --version, -v print the version + ``` + +## Example + +Replace all UUIDs and cache the correspondance. + +```command +$ anonuuid git:(master) ✗ cat < 32 { + part = part[:32] + } + uuid := part[:8] + "-" + part[8:12] + "-1" + part[13:16] + "-" + part[16:20] + "-" + part[20:32] + + err := IsUUID(uuid) + if err != nil { + return "", err + } + + return uuid, nil +} + +// GenerateRandomUUID returns an UUID based on random strings +func GenerateRandomUUID(length int) (string, error) { + var letters = []rune("abcdef0123456789") + + b := make([]rune, length) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return FormatUUID(string(b)) +} + +// GenerateHexspeakUUID returns an UUID formatted string containing hexspeak words +func GenerateHexspeakUUID(i int) (string, error) { + if i < 0 { + i = -i + } + hexspeaks := []string{ + "0ff1ce", + "31337", + "4b1d", + "badc0de", + "badcafe", + "badf00d", + "deadbabe", + "deadbeef", + "deadc0de", + "deadfeed", + "fee1bad", + } + return FormatUUID(hexspeaks[i%len(hexspeaks)]) +} + +// GenerateLenUUID returns an UUID formatted string based on an index number +func GenerateLenUUID(i int) (string, error) { + if i < 0 { + i = 2<<29 + i + } + return FormatUUID(fmt.Sprintf("%x", i)) +} diff --git a/vendor/github.com/moul/gotty-client/LICENSE b/vendor/github.com/moul/gotty-client/LICENSE new file mode 100644 index 000000000..492e2c629 --- /dev/null +++ b/vendor/github.com/moul/gotty-client/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Manfred Touron + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/moul/gotty-client/Makefile b/vendor/github.com/moul/gotty-client/Makefile new file mode 100644 index 000000000..4952ef990 --- /dev/null +++ b/vendor/github.com/moul/gotty-client/Makefile @@ -0,0 +1,81 @@ +# Project-specific variables +BINARIES ?= gotty-client +GOTTY_URL := http://localhost:8081/ +VERSION := $(shell cat .goxc.json | jq -c .PackageVersion | sed 's/"//g') + +CONVEY_PORT ?= 9042 + + +# Common variables +SOURCES := $(shell find . -type f -name "*.go") +COMMANDS := $(shell go list ./... | grep -v /vendor/ | grep /cmd/) +PACKAGES := $(shell go list ./... | grep -v /vendor/ | grep -v /cmd/) +GOENV ?= GO15VENDOREXPERIMENT=1 +GO ?= $(GOENV) go +GODEP ?= $(GOENV) godep +USER ?= $(shell whoami) + + +all: build + + +.PHONY: build +build: $(BINARIES) + + +.PHONY: install +install: + $(GO) install ./cmd/gotty-client + + +$(BINARIES): $(SOURCES) + $(GO) build -o $@ ./cmd/$@ + + +.PHONY: test +test: + $(GO) get -t . + $(GO) test -v . + + +.PHONY: godep-save +godep-save: + $(GODEP) save $(PACKAGES) $(COMMANDS) + + +.PHONY: clean +clean: + rm -f $(BINARIES) + + +.PHONY: re +re: clean all + + +.PHONY: convey +convey: + $(GO) get github.com/smartystreets/goconvey + goconvey -cover -port=$(CONVEY_PORT) -workDir="$(realpath .)" -depth=1 + + +.PHONY: cover +cover: profile.out + + +profile.out: $(SOURCES) + rm -f $@ + $(GO) test -covermode=count -coverpkg=. -coverprofile=$@ . + + +.PHONY: docker-build +docker-build: + go get github.com/laher/goxc + rm -rf contrib/docker/linux_386 + for binary in $(BINARIES); do \ + goxc -bc="linux,386" -d . -pv contrib/docker -n $$binary xc; \ + mv contrib/docker/linux_386/$$binary contrib/docker/entrypoint; \ + docker build -t $(USER)/$$binary contrib/docker; \ + docker run -it --rm $(USER)/$$binary || true; \ + docker inspect --type=image --format="{{ .Id }}" moul/$$binary || true; \ + echo "Now you can run 'docker push $(USER)/$$binary'"; \ + done diff --git a/vendor/github.com/moul/gotty-client/README.md b/vendor/github.com/moul/gotty-client/README.md new file mode 100644 index 000000000..2fb562b32 --- /dev/null +++ b/vendor/github.com/moul/gotty-client/README.md @@ -0,0 +1,201 @@ +# gotty-client +:wrench: Terminal client for [GoTTY](https://github.com/yudai/gotty). + +![](https://raw.githubusercontent.com/moul/gotty-client/master/resources/gotty-client.png) + +[![Build Status](https://travis-ci.org/moul/gotty-client.svg?branch=master)](https://travis-ci.org/moul/gotty-client) +[![GoDoc](https://godoc.org/github.com/moul/gotty-client?status.svg)](https://godoc.org/github.com/moul/gotty-client) + +```ruby + ┌─────────────────┐ + ┌──────▶│ /bin/bash │ + │ └─────────────────┘ + ┌──────────────┐ ┌──────────┐ + │ │ │ Gotty │ +┌───────┐ ┌──▶│ Browser │───────┐ │ │ +│ │ │ │ │ │ │ │ +│ │ │ └──────────────┘ │ │ │ ┌─────────────────┐ +│ Bob │───┤ websockets─▶│ │─▶│ emacs /var/www │ +│ │ │ ╔═ ══ ══ ══ ══ ╗ │ │ │ └─────────────────┘ +│ │ │ ║ ║ │ │ │ +└───────┘ └──▶║ gotty-client ───────┘ │ │ + ║ │ │ + ╚═ ══ ══ ══ ══ ╝ └──────────┘ + │ ┌─────────────────┐ + └──────▶│ tmux attach │ + └─────────────────┘ +``` + +## Example + +Server side ([GoTTY](https://github.com/yudai/gotty)) + +```console +$ gotty -p 9191 sh -c 'while true; do date; sleep 1; done' +2015/08/24 18:54:31 Server is starting with command: sh -c while true; do date; sleep 1; done +2015/08/24 18:54:31 URL: http://[::1]:9191/ +2015/08/24 18:54:34 GET /ws +2015/08/24 18:54:34 New client connected: 127.0.0.1:61811 +2015/08/24 18:54:34 Command is running for client 127.0.0.1:61811 with PID 64834 +2015/08/24 18:54:39 Command exited for: 127.0.0.1:61811 +2015/08/24 18:54:39 Connection closed: 127.0.0.1:61811 +... +``` + +**Client side** + +```console +$ gotty-client http://localhost:9191/ +INFO[0000] New title: GoTTY - sh -c while true; do date; sleep 1; done (jean-michel-van-damme.local) +WARN[0000] Unhandled protocol message: json pref: 2{} +Mon Aug 24 18:54:34 CEST 2015 +Mon Aug 24 18:54:35 CEST 2015 +Mon Aug 24 18:54:36 CEST 2015 +Mon Aug 24 18:54:37 CEST 2015 +Mon Aug 24 18:54:38 CEST 2015 +^C +``` + +## Usage + +```console +$ gotty-client -h +NAME: + gotty-client - GoTTY client for your terminal + +USAGE: + gotty-client [global options] command [command options] GOTTY_URL + +VERSION: + 1.3.0+ + +AUTHOR(S): + Manfred Touron + +COMMANDS: + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS: + --debug, -D Enable debug mode [$GOTTY_CLIENT_DEBUG] + --help, -h show help + --version, -v print the version +``` + +## Install + +Install latest version using Golang (recommended) + +```console +$ go get github.com/moul/gotty-client/cmd/gotty-client +``` + +--- + +Install latest version using Homebrew (Mac OS X) + +```console +$ brew install https://raw.githubusercontent.com/moul/gotty-client/master/contrib/homebrew/gotty-client.rb --HEAD +``` + +or the latest released version + +```console +$ brew install https://raw.githubusercontent.com/moul/gotty-client/master/contrib/homebrew/gotty-client.rb +``` + +## Changelog + +### master (unreleased) + +* No entry + +[full commits list](https://github.com/moul/gotty-client/compare/v1.6.1...master) + +### [v1.6.1](https://github.com/moul/gotty-client/releases/tag/v1.6.1) (2017-01-19) + +* Do not exit on EOF ([#45](https://github.com/moul/gotty-client/pull/45)) ([@gurjeet](https://github.com/gurjeet)) + +[full commits list](https://github.com/moul/gotty-client/compare/v1.6.0...v1.6.1) + +### [v1.6.0](https://github.com/moul/gotty-client/releases/tag/v1.6.0) (2016-05-23) + +* Support of `--use-proxy-from-env` (Add Proxy support) ([#36](https://github.com/moul/gotty-client/pull/36)) ([@byung2](https://github.com/byung2)) +* Add debug mode ([#18](https://github.com/moul/gotty-client/issues/18)) +* Fix argument passing ([#16](https://github.com/moul/gotty-client/issues/16)) +* Remove warnings + golang fixes and refactoring ([@QuentinPerez](https://github.com/QuentinPerez)) + +[full commits list](https://github.com/moul/gotty-client/compare/v1.5.0...v1.6.0) + +### [v1.5.0](https://github.com/moul/gotty-client/releases/tag/v1.5.0) (2016-02-18) + +* Add autocomplete support ([#19](https://github.com/moul/gotty-client/issues/19)) +* Switch from `Party` to `Godep` +* Fix terminal data being interpreted as format string ([#34](https://github.com/moul/gotty-client/pull/34)) ([@mickael9](https://github.com/mickael9)) + +[full commits list](https://github.com/moul/gotty-client/compare/v1.4.0...v1.5.0) + +### [v1.4.0](https://github.com/moul/gotty-client/releases/tag/v1.4.0) (2015-12-09) + +* Remove solaris,plan9,nacl for `.goxc.json` +* Add an error if the go version is lower than 1.5 +* Flexible parsing of the input URL +* Add tests +* Support of `--skip-tls-verify` + +[full commits list](https://github.com/moul/gotty-client/compare/v1.3.0...v1.4.0) + +### [v1.3.0](https://github.com/moul/gotty-client/releases/tag/v1.3.0) (2015-10-27) + +* Fix `connected` state when using `Connect()` + `Loop()` methods +* Add `ExitLoop` which allow to exit from `Loop` function + +[full commits list](https://github.com/moul/gotty-client/compare/v1.2.0...v1.3.0) + +### [v1.2.0](https://github.com/moul/gotty-client/releases/tag/v1.2.0) (2015-10-23) + +* Removed an annoying warning when exiting connection ([#22](https://github.com/moul/gotty-client/issues/22)) ([@QuentinPerez](https://github.com/QuentinPerez)) +* Add the ability to configure alternative stdout ([#21](https://github.com/moul/gotty-client/issues/21)) ([@QuentinPerez](https://github.com/QuentinPerez)) +* Refactored the goroutine system with select, improve speed and stability ([@QuentinPerez](https://github.com/QuentinPerez)) +* Add debug mode (`--debug`/`-D`) ([#18](https://github.com/moul/gotty-client/issues/18)) +* Improve error message when connecting by checking HTTP status code +* Fix arguments passing ([#16](https://github.com/moul/gotty-client/issues/16)) +* Dropped support for golang<1.5 +* Small fixes + +[full commits list](https://github.com/moul/gotty-client/compare/v1.1.0...v1.2.0) + +### [v1.1.0](https://github.com/moul/gotty-client/releases/tag/v1.1.0) (2015-10-10) + +* Handling arguments + using mutexes (thanks to [@QuentinPerez](https://github.com/QuentinPerez)) +* Add logo ([#9](https://github.com/moul/gotty-client/issues/9)) +* Using codegansta/cli for CLI parsing ([#3](https://github.com/moul/gotty-client/issues/3)) +* Fix panic when running on older GoTTY server ([#13](https://github.com/moul/gotty-client/issues/13)) +* Add 'homebrew support' ([#1](https://github.com/moul/gotty-client/issues/1)) +* Add Changelog ([#5](https://github.com/moul/gotty-client/issues/5)) +* Add GOXC configuration to build binaries for multiple architectures ([#2](https://github.com/moul/gotty-client/issues/2)) + +[full commits list](https://github.com/moul/gotty-client/compare/v1.0.1...v1.1.0) + +### [v1.0.1](https://github.com/moul/gotty-client/releases/tag/v1.0.1) (2015-09-27) + +* Using party to manage dependencies + +[full commits list](https://github.com/moul/gotty-client/compare/v1.0.0...v1.0.1) + +### [v1.0.0](https://github.com/moul/gotty-client/releases/tag/v1.0.0) (2015-09-27) + +Compatible with [GoTTY](https://github.com/yudai/gotty) version: [v0.0.10](https://github.com/yudai/gotty/releases/tag/v0.0.10) + +#### Features + +* Support **basic-auth** +* Support **terminal-(re)size** +* Support **write** +* Support **title** +* Support **custom URI** + +[full commits list](https://github.com/moul/gotty-client/compare/cf0c1146c7ce20fe0bd65764c13253bc575cd43a...v1.0.0) + +## License + +MIT diff --git a/vendor/github.com/moul/gotty-client/arch.go b/vendor/github.com/moul/gotty-client/arch.go new file mode 100644 index 000000000..d856fbd2c --- /dev/null +++ b/vendor/github.com/moul/gotty-client/arch.go @@ -0,0 +1,34 @@ +// +build !windows + +package gottyclient + +import ( + "encoding/json" + "fmt" + "os" + "os/signal" + "syscall" + "unsafe" +) + +func notifySignalSIGWINCH(c chan<- os.Signal) { + signal.Notify(c, syscall.SIGWINCH) +} + +func resetSignalSIGWINCH() { + signal.Reset(syscall.SIGWINCH) +} + +func syscallTIOCGWINSZ() ([]byte, error) { + ws := winsize{} + + syscall.Syscall(syscall.SYS_IOCTL, + uintptr(0), uintptr(syscall.TIOCGWINSZ), + uintptr(unsafe.Pointer(&ws))) + + b, err := json.Marshal(ws) + if err != nil { + return nil, fmt.Errorf("json.Marshal error: %v", err) + } + return b, err +} diff --git a/vendor/github.com/moul/gotty-client/arch_windows.go b/vendor/github.com/moul/gotty-client/arch_windows.go new file mode 100644 index 000000000..53aaffc5f --- /dev/null +++ b/vendor/github.com/moul/gotty-client/arch_windows.go @@ -0,0 +1,16 @@ +package gottyclient + +import ( + "errors" + "os" +) + +func notifySignalSIGWINCH(c chan<- os.Signal) { +} + +func resetSignalSIGWINCH() { +} + +func syscallTIOCGWINSZ() ([]byte, error) { + return nil, errors.New("SIGWINCH isn't supported on this ARCH") +} diff --git a/vendor/github.com/moul/gotty-client/glide.lock b/vendor/github.com/moul/gotty-client/glide.lock new file mode 100644 index 000000000..61d5aeee3 --- /dev/null +++ b/vendor/github.com/moul/gotty-client/glide.lock @@ -0,0 +1,37 @@ +hash: 5ba4ef945563e8e85097f2699126b1577f9c667fdbbcdd71604e553ab7dd2a03 +updated: 2017-02-04T23:03:32.03375088+01:00 +imports: +- name: github.com/codegangsta/cli + version: 2ae9042f5bcbaf15b01229f8395ba8e72e01bded +- name: github.com/creack/goselect + version: 40085cf5fd629ccd88dc328895f1f137d09a1de4 +- name: github.com/gopherjs/gopherjs + version: db27c7c470d7404b6b201f82d6fd4821260bd13e + subpackages: + - js +- name: github.com/gorilla/websocket + version: 1f512fc3f05332ba7117626cdfb4e07474e58e60 +- name: github.com/jtolds/gls + version: 8ddce2a84170772b95dd5d576c48d517b22cac63 +- name: github.com/Sirupsen/logrus + version: cd7d1bbe41066b6c1f19780f895901052150a575 +- name: github.com/smartystreets/assertions + version: 40711f7748186bbf9c99977cd89f21ce1a229447 + subpackages: + - internal/go-render/render + - internal/oglematchers +- name: github.com/smartystreets/goconvey + version: d4c757aa9afd1e2fc1832aaab209b5794eb336e1 + subpackages: + - convey + - convey/gotest + - convey/reporting +- name: golang.org/x/crypto + version: 5bcd134fee4dd1475da17714aac19c0aa0142e2f + subpackages: + - ssh/terminal +- name: golang.org/x/sys + version: d4feaf1a7e61e1d9e79e6c4e76c6349e9cab0a03 + subpackages: + - unix +testImports: [] diff --git a/vendor/github.com/moul/gotty-client/glide.yaml b/vendor/github.com/moul/gotty-client/glide.yaml new file mode 100644 index 000000000..287efb9d8 --- /dev/null +++ b/vendor/github.com/moul/gotty-client/glide.yaml @@ -0,0 +1,35 @@ +package: github.com/moul/gotty-client +import: +- package: github.com/Sirupsen/logrus + version: cd7d1bbe41066b6c1f19780f895901052150a575 +- package: github.com/codegangsta/cli + version: 2ae9042f5bcbaf15b01229f8395ba8e72e01bded +- package: github.com/creack/goselect + version: 40085cf5fd629ccd88dc328895f1f137d09a1de4 +- package: github.com/gopherjs/gopherjs + version: db27c7c470d7404b6b201f82d6fd4821260bd13e + subpackages: + - js +- package: github.com/gorilla/websocket + version: 1f512fc3f05332ba7117626cdfb4e07474e58e60 +- package: github.com/jtolds/gls + version: 8ddce2a84170772b95dd5d576c48d517b22cac63 +- package: github.com/smartystreets/assertions + version: 40711f7748186bbf9c99977cd89f21ce1a229447 + subpackages: + - internal/go-render/render + - internal/oglematchers +- package: github.com/smartystreets/goconvey + version: d4c757aa9afd1e2fc1832aaab209b5794eb336e1 + subpackages: + - convey + - convey/gotest + - convey/reporting +- package: golang.org/x/crypto + version: 5bcd134fee4dd1475da17714aac19c0aa0142e2f + subpackages: + - ssh/terminal +- package: golang.org/x/sys + version: d4feaf1a7e61e1d9e79e6c4e76c6349e9cab0a03 + subpackages: + - unix diff --git a/vendor/github.com/moul/gotty-client/gotty-client.go b/vendor/github.com/moul/gotty-client/gotty-client.go new file mode 100644 index 000000000..35e599d9b --- /dev/null +++ b/vendor/github.com/moul/gotty-client/gotty-client.go @@ -0,0 +1,469 @@ +package gottyclient + +import ( + "crypto/tls" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "regexp" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/creack/goselect" + "github.com/gorilla/websocket" + "golang.org/x/crypto/ssh/terminal" +) + +// GetAuthTokenURL transforms a GoTTY http URL to its AuthToken file URL +func GetAuthTokenURL(httpURL string) (*url.URL, *http.Header, error) { + header := http.Header{} + target, err := url.Parse(httpURL) + if err != nil { + return nil, nil, err + } + + target.Path = strings.TrimLeft(target.Path+"auth_token.js", "/") + + if target.User != nil { + header.Add("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(target.User.String()))) + target.User = nil + } + + return target, &header, nil +} + +// GetURLQuery returns url.query +func GetURLQuery(rawurl string) (url.Values, error) { + target, err := url.Parse(rawurl) + if err != nil { + return nil, err + } + return target.Query(), nil +} + +// GetWebsocketURL transforms a GoTTY http URL to its WebSocket URL +func GetWebsocketURL(httpURL string) (*url.URL, *http.Header, error) { + header := http.Header{} + target, err := url.Parse(httpURL) + if err != nil { + return nil, nil, err + } + + if target.Scheme == "https" { + target.Scheme = "wss" + } else { + target.Scheme = "ws" + } + + target.Path = strings.TrimLeft(target.Path+"ws", "/") + + if target.User != nil { + header.Add("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(target.User.String()))) + target.User = nil + } + + return target, &header, nil +} + +type Client struct { + Dialer *websocket.Dialer + Conn *websocket.Conn + URL string + WriteMutex *sync.Mutex + Output io.Writer + poison chan bool + SkipTLSVerify bool + UseProxyFromEnv bool + Connected bool +} + +type querySingleType struct { + AuthToken string `json:"AuthToken"` + Arguments string `json:"Arguments"` +} + +func (c *Client) write(data []byte) error { + c.WriteMutex.Lock() + defer c.WriteMutex.Unlock() + return c.Conn.WriteMessage(websocket.TextMessage, data) +} + +// GetAuthToken retrieves an Auth Token from dynamic auth_token.js file +func (c *Client) GetAuthToken() (string, error) { + target, header, err := GetAuthTokenURL(c.URL) + if err != nil { + return "", err + } + + logrus.Debugf("Fetching auth token auth-token: %q", target.String()) + req, err := http.NewRequest("GET", target.String(), nil) + req.Header = *header + tr := &http.Transport{} + if c.SkipTLSVerify { + conf := &tls.Config{InsecureSkipVerify: true} + tr.TLSClientConfig = conf + } + if c.UseProxyFromEnv { + tr.Proxy = http.ProxyFromEnvironment + } + client := &http.Client{Transport: tr} + resp, err := client.Do(req) + if err != nil { + return "", err + } + + switch resp.StatusCode { + case 200: + // Everything is OK + default: + return "", fmt.Errorf("unknown status code: %d (%s)", resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + re := regexp.MustCompile("var gotty_auth_token = '(.*)'") + output := re.FindStringSubmatch(string(body)) + if len(output) == 0 { + return "", fmt.Errorf("Cannot fetch GoTTY auth-token, please upgrade your GoTTY server.") + } + + return output[1], nil +} + +// Connect tries to dial a websocket server +func (c *Client) Connect() error { + // Retrieve AuthToken + authToken, err := c.GetAuthToken() + if err != nil { + return err + } + logrus.Debugf("Auth-token: %q", authToken) + + // Open WebSocket connection + target, header, err := GetWebsocketURL(c.URL) + if err != nil { + return err + } + logrus.Debugf("Connecting to websocket: %q", target.String()) + if c.SkipTLSVerify { + c.Dialer.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + } + if c.UseProxyFromEnv { + c.Dialer.Proxy = http.ProxyFromEnvironment + } + conn, _, err := c.Dialer.Dial(target.String(), *header) + if err != nil { + return err + } + c.Conn = conn + c.Connected = true + + // Pass arguments and auth-token + query, err := GetURLQuery(c.URL) + if err != nil { + return err + } + querySingle := querySingleType{ + Arguments: "?" + query.Encode(), + AuthToken: authToken, + } + json, err := json.Marshal(querySingle) + if err != nil { + logrus.Errorf("Failed to parse init message %v", err) + return err + } + // Send Json + logrus.Debugf("Sending arguments and auth-token") + err = c.write(json) + if err != nil { + return err + } + + go c.pingLoop() + + return nil +} + +func (c *Client) pingLoop() { + for { + logrus.Debugf("Sending ping") + c.write([]byte("1")) + time.Sleep(30 * time.Second) + } +} + +// Close will nicely close the dialer +func (c *Client) Close() { + c.Conn.Close() +} + +// ExitLoop will kill all goroutines launched by c.Loop() +// ExitLoop() -> wait Loop() -> Close() +func (c *Client) ExitLoop() { + fname := "ExitLoop" + openPoison(fname, c.poison) +} + +// Loop will look indefinitely for new messages +func (c *Client) Loop() error { + if !c.Connected { + err := c.Connect() + if err != nil { + return err + } + } + + wg := &sync.WaitGroup{} + + wg.Add(1) + go c.termsizeLoop(wg) + + wg.Add(1) + go c.readLoop(wg) + + wg.Add(1) + go c.writeLoop(wg) + + /* Wait for all of the above goroutines to finish */ + wg.Wait() + + logrus.Debug("Client.Loop() exiting") + return nil +} + +type winsize struct { + Rows uint16 `json:"rows"` + Columns uint16 `json:"columns"` + // unused + x uint16 + y uint16 +} + +type posionReason int + +const ( + committedSuicide = iota + killed +) + +func openPoison(fname string, poison chan bool) posionReason { + logrus.Debug(fname + " suicide") + + /* + * The close() may raise panic if multiple goroutines commit suicide at the + * same time. Prevent that panic from bubbling up. + */ + defer func() { + if r := recover(); r != nil { + logrus.Debug("Prevented panic() of simultaneous suicides", r) + } + }() + + /* Signal others to die */ + close(poison) + return committedSuicide +} + +func die(fname string, poison chan bool) posionReason { + logrus.Debug(fname + " died") + + wasOpen := <-poison + if wasOpen { + logrus.Error("ERROR: The channel was open when it wasn't suppoed to be") + } + + return killed +} + +func (c *Client) termsizeLoop(wg *sync.WaitGroup) posionReason { + + defer wg.Done() + fname := "termsizeLoop" + + ch := make(chan os.Signal, 1) + notifySignalSIGWINCH(ch) + defer resetSignalSIGWINCH() + + for { + if b, err := syscallTIOCGWINSZ(); err != nil { + logrus.Warn(err) + } else { + if err = c.write(append([]byte("2"), b...)); err != nil { + logrus.Warnf("ws.WriteMessage failed: %v", err) + } + } + select { + case <-c.poison: + /* Somebody poisoned the well; die */ + return die(fname, c.poison) + case <-ch: + } + } +} + +type exposeFd interface { + Fd() uintptr +} + +func (c *Client) writeLoop(wg *sync.WaitGroup) posionReason { + + defer wg.Done() + fname := "writeLoop" + + buff := make([]byte, 128) + oldState, err := terminal.MakeRaw(0) + if err == nil { + defer terminal.Restore(0, oldState) + } + + rdfs := &goselect.FDSet{} + reader := io.Reader(os.Stdin) + for { + select { + case <-c.poison: + /* Somebody poisoned the well; die */ + return die(fname, c.poison) + default: + } + + rdfs.Zero() + rdfs.Set(reader.(exposeFd).Fd()) + err := goselect.Select(1, rdfs, nil, nil, 50*time.Millisecond) + if err != nil { + return openPoison(fname, c.poison) + } + if rdfs.IsSet(reader.(exposeFd).Fd()) { + size, err := reader.Read(buff) + + if err != nil { + if err == io.EOF { + // Send EOF to GoTTY + + // Send 'Input' marker, as defined in GoTTY::client_context.go, + // followed by EOT (a translation of Ctrl-D for terminals) + err = c.write(append([]byte("0"), byte(4))) + + if err != nil { + return openPoison(fname, c.poison) + } + continue + } else { + return openPoison(fname, c.poison) + } + } + + if size <= 0 { + continue + } + + data := buff[:size] + err = c.write(append([]byte("0"), data...)) + if err != nil { + return openPoison(fname, c.poison) + } + } + } +} + +func (c *Client) readLoop(wg *sync.WaitGroup) posionReason { + + defer wg.Done() + fname := "readLoop" + + type MessageNonBlocking struct { + Data []byte + Err error + } + msgChan := make(chan MessageNonBlocking) + + for { + go func() { + _, data, err := c.Conn.ReadMessage() + msgChan <- MessageNonBlocking{Data: data, Err: err} + }() + + select { + case <-c.poison: + /* Somebody poisoned the well; die */ + return die(fname, c.poison) + case msg := <-msgChan: + if msg.Err != nil { + + if _, ok := msg.Err.(*websocket.CloseError); !ok { + logrus.Warnf("c.Conn.ReadMessage: %v", msg.Err) + } + return openPoison(fname, c.poison) + } + if len(msg.Data) == 0 { + + logrus.Warnf("An error has occured") + return openPoison(fname, c.poison) + } + switch msg.Data[0] { + case '0': // data + buf, err := base64.StdEncoding.DecodeString(string(msg.Data[1:])) + if err != nil { + logrus.Warnf("Invalid base64 content: %q", msg.Data[1:]) + break + } + c.Output.Write(buf) + case '1': // pong + case '2': // new title + newTitle := string(msg.Data[1:]) + fmt.Fprintf(c.Output, "\033]0;%s\007", newTitle) + case '3': // json prefs + logrus.Debugf("Unhandled protocol message: json pref: %s", string(msg.Data[1:])) + case '4': // autoreconnect + logrus.Debugf("Unhandled protocol message: autoreconnect: %s", string(msg.Data)) + default: + logrus.Warnf("Unhandled protocol message: %s", string(msg.Data)) + } + } + } +} + +// SetOutput changes the output stream +func (c *Client) SetOutput(w io.Writer) { + c.Output = w +} + +// ParseURL parses an URL which may be incomplete and tries to standardize it +func ParseURL(input string) (string, error) { + parsed, err := url.Parse(input) + if err != nil { + return "", err + } + switch parsed.Scheme { + case "http", "https": + // everything is ok + default: + return ParseURL(fmt.Sprintf("http://%s", input)) + } + return parsed.String(), nil +} + +// NewClient returns a GoTTY client object +func NewClient(inputURL string) (*Client, error) { + url, err := ParseURL(inputURL) + if err != nil { + return nil, err + } + return &Client{ + Dialer: &websocket.Dialer{}, + URL: url, + WriteMutex: &sync.Mutex{}, + Output: os.Stdout, + poison: make(chan bool), + }, nil +} diff --git a/vendor/github.com/renstrom/fuzzysearch/LICENSE b/vendor/github.com/renstrom/fuzzysearch/LICENSE new file mode 100644 index 000000000..9cc753370 --- /dev/null +++ b/vendor/github.com/renstrom/fuzzysearch/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Peter Renström + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/renstrom/fuzzysearch/fuzzy/fuzzy.go b/vendor/github.com/renstrom/fuzzysearch/fuzzy/fuzzy.go new file mode 100644 index 000000000..63277d51e --- /dev/null +++ b/vendor/github.com/renstrom/fuzzysearch/fuzzy/fuzzy.go @@ -0,0 +1,167 @@ +// Fuzzy searching allows for flexibly matching a string with partial input, +// useful for filtering data very quickly based on lightweight user input. +package fuzzy + +import ( + "unicode" + "unicode/utf8" +) + +var noop = func(r rune) rune { return r } + +// Match returns true if source matches target using a fuzzy-searching +// algorithm. Note that it doesn't implement Levenshtein distance (see +// RankMatch instead), but rather a simplified version where there's no +// approximation. The method will return true only if each character in the +// source can be found in the target and occurs after the preceding matches. +func Match(source, target string) bool { + return match(source, target, noop) +} + +// MatchFold is a case-insensitive version of Match. +func MatchFold(source, target string) bool { + return match(source, target, unicode.ToLower) +} + +func match(source, target string, fn func(rune) rune) bool { + lenDiff := len(target) - len(source) + + if lenDiff < 0 { + return false + } + + if lenDiff == 0 && source == target { + return true + } + +Outer: + for _, r1 := range source { + for i, r2 := range target { + if fn(r1) == fn(r2) { + target = target[i+utf8.RuneLen(r2):] + continue Outer + } + } + return false + } + + return true +} + +// Find will return a list of strings in targets that fuzzy matches source. +func Find(source string, targets []string) []string { + return find(source, targets, noop) +} + +// FindFold is a case-insensitive version of Find. +func FindFold(source string, targets []string) []string { + return find(source, targets, unicode.ToLower) +} + +func find(source string, targets []string, fn func(rune) rune) []string { + var matches []string + + for _, target := range targets { + if match(source, target, fn) { + matches = append(matches, target) + } + } + + return matches +} + +// RankMatch is similar to Match except it will measure the Levenshtein +// distance between the source and the target and return its result. If there +// was no match, it will return -1. +// Given the requirements of match, RankMatch only needs to perform a subset of +// the Levenshtein calculation, only deletions need be considered, required +// additions and substitutions would fail the match test. +func RankMatch(source, target string) int { + return rank(source, target, noop) +} + +// RankMatchFold is a case-insensitive version of RankMatch. +func RankMatchFold(source, target string) int { + return rank(source, target, unicode.ToLower) +} + +func rank(source, target string, fn func(rune) rune) int { + lenDiff := len(target) - len(source) + + if lenDiff < 0 { + return -1 + } + + if lenDiff == 0 && source == target { + return 0 + } + + runeDiff := 0 + +Outer: + for _, r1 := range source { + for i, r2 := range target { + if fn(r1) == fn(r2) { + target = target[i+utf8.RuneLen(r2):] + continue Outer + } else { + runeDiff++ + } + } + return -1 + } + + // Count up remaining char + for len(target) > 0 { + target = target[utf8.RuneLen(rune(target[0])):] + runeDiff++ + } + + return runeDiff +} + +// RankFind is similar to Find, except it will also rank all matches using +// Levenshtein distance. +func RankFind(source string, targets []string) Ranks { + var r Ranks + for _, target := range find(source, targets, noop) { + distance := LevenshteinDistance(source, target) + r = append(r, Rank{source, target, distance}) + } + return r +} + +// RankFindFold is a case-insensitive version of RankFind. +func RankFindFold(source string, targets []string) Ranks { + var r Ranks + for _, target := range find(source, targets, unicode.ToLower) { + distance := LevenshteinDistance(source, target) + r = append(r, Rank{source, target, distance}) + } + return r +} + +type Rank struct { + // Source is used as the source for matching. + Source string + + // Target is the word matched against. + Target string + + // Distance is the Levenshtein distance between Source and Target. + Distance int +} + +type Ranks []Rank + +func (r Ranks) Len() int { + return len(r) +} + +func (r Ranks) Swap(i, j int) { + r[i], r[j] = r[j], r[i] +} + +func (r Ranks) Less(i, j int) bool { + return r[i].Distance < r[j].Distance +} diff --git a/vendor/github.com/renstrom/fuzzysearch/fuzzy/levenshtein.go b/vendor/github.com/renstrom/fuzzysearch/fuzzy/levenshtein.go new file mode 100644 index 000000000..237923d34 --- /dev/null +++ b/vendor/github.com/renstrom/fuzzysearch/fuzzy/levenshtein.go @@ -0,0 +1,43 @@ +package fuzzy + +// LevenshteinDistance measures the difference between two strings. +// The Levenshtein distance between two words is the minimum number of +// single-character edits (i.e. insertions, deletions or substitutions) +// required to change one word into the other. +// +// This implemention is optimized to use O(min(m,n)) space and is based on the +// optimized C version found here: +// http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#C +func LevenshteinDistance(s, t string) int { + r1, r2 := []rune(s), []rune(t) + column := make([]int, len(r1)+1) + + for y := 1; y <= len(r1); y++ { + column[y] = y + } + + for x := 1; x <= len(r2); x++ { + column[0] = x + + for y, lastDiag := 1, x-1; y <= len(r1); y++ { + oldDiag := column[y] + cost := 0 + if r1[y-1] != r2[x-1] { + cost = 1 + } + column[y] = min(column[y]+1, column[y-1]+1, lastDiag+cost) + lastDiag = oldDiag + } + } + + return column[len(r1)] +} + +func min(a, b, c int) int { + if a < b && a < c { + return a + } else if b < c { + return b + } + return c +} diff --git a/vendor/github.com/scaleway/scaleway-cli/LICENSE.md b/vendor/github.com/scaleway/scaleway-cli/LICENSE.md new file mode 100644 index 000000000..7503a16ca --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/LICENSE.md @@ -0,0 +1,22 @@ +The MIT License +=============== + +Copyright (c) **2014-2016 Scaleway ([@scaleway](https://twitter.com/scaleway))** + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/README.md b/vendor/github.com/scaleway/scaleway-cli/pkg/api/README.md new file mode 100644 index 000000000..559a7018d --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/README.md @@ -0,0 +1,25 @@ +# Scaleway's API + +[![GoDoc](https://godoc.org/github.com/scaleway/scaleway-cli/pkg/api?status.svg)](https://godoc.org/github.com/scaleway/scaleway-cli/pkg/api) + +This package contains facilities to play with the Scaleway API, it includes the following features: + +- dedicated configuration file containing credentials to deal with the API +- caching to resolve UUIDs without contacting the API + +## Links + +- [API documentation](https://developer.scaleway.com) +- [Official Python SDK](https://github.com/scaleway/python-scaleway) +- Projects using this SDK + - https://github.com/scaleway/devhub + - https://github.com/scaleway/docker-machine-driver-scaleway + - https://github.com/scaleway-community/scaleway-ubuntu-coreos/blob/master/overlay/usr/local/update-firewall/scw-api/cache.go + - https://github.com/pulcy/quark + - https://github.com/hex-sh/terraform-provider-scaleway + - https://github.com/tscolari/bosh-scaleway-cpi +- Other **golang** clients + - https://github.com/lalyos/onlabs + - https://github.com/meatballhat/packer-builder-onlinelabs + - https://github.com/nlamirault/go-scaleway + - https://github.com/golang/build/blob/master/cmd/scaleway/scaleway.go diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go new file mode 100644 index 000000000..5ce0157dc --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go @@ -0,0 +1,2754 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +// Interact with Scaleway API + +// Package api contains client and functions to interact with Scaleway API +package api + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "sort" + "strconv" + "strings" + "text/tabwriter" + "text/template" + "time" + + "golang.org/x/sync/errgroup" +) + +// Default values +var ( + AccountAPI = "https://account.scaleway.com/" + MetadataAPI = "http://169.254.42.42/" + MarketplaceAPI = "https://api-marketplace.scaleway.com" + ComputeAPIPar1 = "https://cp-par1.scaleway.com/" + ComputeAPIAms1 = "https://cp-ams1.scaleway.com" + + URLPublicDNS = ".pub.cloud.scaleway.com" + URLPrivateDNS = ".priv.cloud.scaleway.com" +) + +func init() { + if url := os.Getenv("SCW_ACCOUNT_API"); url != "" { + AccountAPI = url + } + if url := os.Getenv("SCW_METADATA_API"); url != "" { + MetadataAPI = url + } + if url := os.Getenv("SCW_MARKETPLACE_API"); url != "" { + MarketplaceAPI = url + } +} + +const ( + perPage = 50 +) + +// ScalewayAPI is the interface used to communicate with the Scaleway API +type ScalewayAPI struct { + // Organization is the identifier of the Scaleway organization + Organization string + + // Token is the authentication token for the Scaleway organization + Token string + + // Password is the authentication password + password string + + userAgent string + + // Cache is used to quickly resolve identifiers from names + Cache *ScalewayCache + + client *http.Client + verbose bool + computeAPI string + + Region string + // + Logger +} + +// ScalewayAPIError represents a Scaleway API Error +type ScalewayAPIError struct { + // Message is a human-friendly error message + APIMessage string `json:"message,omitempty"` + + // Type is a string code that defines the kind of error + Type string `json:"type,omitempty"` + + // Fields contains detail about validation error + Fields map[string][]string `json:"fields,omitempty"` + + // StatusCode is the HTTP status code received + StatusCode int `json:"-"` + + // Message + Message string `json:"-"` +} + +// Error returns a string representing the error +func (e ScalewayAPIError) Error() string { + var b bytes.Buffer + + fmt.Fprintf(&b, "StatusCode: %v, ", e.StatusCode) + fmt.Fprintf(&b, "Type: %v, ", e.Type) + fmt.Fprintf(&b, "APIMessage: \x1b[31m%v\x1b[0m", e.APIMessage) + if len(e.Fields) > 0 { + fmt.Fprintf(&b, ", Details: %v", e.Fields) + } + return b.String() +} + +// HideAPICredentials removes API credentials from a string +func (s *ScalewayAPI) HideAPICredentials(input string) string { + output := input + if s.Token != "" { + output = strings.Replace(output, s.Token, "00000000-0000-4000-8000-000000000000", -1) + } + if s.Organization != "" { + output = strings.Replace(output, s.Organization, "00000000-0000-5000-9000-000000000000", -1) + } + if s.password != "" { + output = strings.Replace(output, s.password, "XX-XX-XX-XX", -1) + } + return output +} + +// ScalewayIPAddress represents a Scaleway IP address +type ScalewayIPAddress struct { + // Identifier is a unique identifier for the IP address + Identifier string `json:"id,omitempty"` + + // IP is an IPv4 address + IP string `json:"address,omitempty"` + + // Dynamic is a flag that defines an IP that change on each reboot + Dynamic *bool `json:"dynamic,omitempty"` +} + +// ScalewayVolume represents a Scaleway Volume +type ScalewayVolume struct { + // Identifier is a unique identifier for the volume + Identifier string `json:"id,omitempty"` + + // Size is the allocated size of the volume + Size uint64 `json:"size,omitempty"` + + // CreationDate is the creation date of the volume + CreationDate string `json:"creation_date,omitempty"` + + // ModificationDate is the date of the last modification of the volume + ModificationDate string `json:"modification_date,omitempty"` + + // Organization is the organization owning the volume + Organization string `json:"organization,omitempty"` + + // Name is the name of the volume + Name string `json:"name,omitempty"` + + // Server is the server using this image + Server *struct { + Identifier string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + } `json:"server,omitempty"` + + // VolumeType is a Scaleway identifier for the kind of volume (default: l_ssd) + VolumeType string `json:"volume_type,omitempty"` + + // ExportURI represents the url used by initrd/scripts to attach the volume + ExportURI string `json:"export_uri,omitempty"` +} + +// ScalewayOneVolume represents the response of a GET /volumes/UUID API call +type ScalewayOneVolume struct { + Volume ScalewayVolume `json:"volume,omitempty"` +} + +// ScalewayVolumes represents a group of Scaleway volumes +type ScalewayVolumes struct { + // Volumes holds scaleway volumes of the response + Volumes []ScalewayVolume `json:"volumes,omitempty"` +} + +// ScalewayVolumeDefinition represents a Scaleway volume definition +type ScalewayVolumeDefinition struct { + // Name is the user-defined name of the volume + Name string `json:"name"` + + // Image is the image used by the volume + Size uint64 `json:"size"` + + // Bootscript is the bootscript used by the volume + Type string `json:"volume_type"` + + // Organization is the owner of the volume + Organization string `json:"organization"` +} + +// ScalewayVolumePutDefinition represents a Scaleway volume with nullable fields (for PUT) +type ScalewayVolumePutDefinition struct { + Identifier *string `json:"id,omitempty"` + Size *uint64 `json:"size,omitempty"` + CreationDate *string `json:"creation_date,omitempty"` + ModificationDate *string `json:"modification_date,omitempty"` + Organization *string `json:"organization,omitempty"` + Name *string `json:"name,omitempty"` + Server struct { + Identifier *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + } `json:"server,omitempty"` + VolumeType *string `json:"volume_type,omitempty"` + ExportURI *string `json:"export_uri,omitempty"` +} + +// ScalewayImage represents a Scaleway Image +type ScalewayImage struct { + // Identifier is a unique identifier for the image + Identifier string `json:"id,omitempty"` + + // Name is a user-defined name for the image + Name string `json:"name,omitempty"` + + // CreationDate is the creation date of the image + CreationDate string `json:"creation_date,omitempty"` + + // ModificationDate is the date of the last modification of the image + ModificationDate string `json:"modification_date,omitempty"` + + // RootVolume is the root volume bound to the image + RootVolume ScalewayVolume `json:"root_volume,omitempty"` + + // Public is true for public images and false for user images + Public bool `json:"public,omitempty"` + + // Bootscript is the bootscript bound to the image + DefaultBootscript *ScalewayBootscript `json:"default_bootscript,omitempty"` + + // Organization is the owner of the image + Organization string `json:"organization,omitempty"` + + // Arch is the architecture target of the image + Arch string `json:"arch,omitempty"` + + // FIXME: extra_volumes +} + +// ScalewayImageIdentifier represents a Scaleway Image Identifier +type ScalewayImageIdentifier struct { + Identifier string + Arch string + Region string + Owner string +} + +// ScalewayOneImage represents the response of a GET /images/UUID API call +type ScalewayOneImage struct { + Image ScalewayImage `json:"image,omitempty"` +} + +// ScalewayImages represents a group of Scaleway images +type ScalewayImages struct { + // Images holds scaleway images of the response + Images []ScalewayImage `json:"images,omitempty"` +} + +// ScalewaySnapshot represents a Scaleway Snapshot +type ScalewaySnapshot struct { + // Identifier is a unique identifier for the snapshot + Identifier string `json:"id,omitempty"` + + // Name is a user-defined name for the snapshot + Name string `json:"name,omitempty"` + + // CreationDate is the creation date of the snapshot + CreationDate string `json:"creation_date,omitempty"` + + // ModificationDate is the date of the last modification of the snapshot + ModificationDate string `json:"modification_date,omitempty"` + + // Size is the allocated size of the volume + Size uint64 `json:"size,omitempty"` + + // Organization is the owner of the snapshot + Organization string `json:"organization"` + + // State is the current state of the snapshot + State string `json:"state"` + + // VolumeType is the kind of volume behind the snapshot + VolumeType string `json:"volume_type"` + + // BaseVolume is the volume from which the snapshot inherits + BaseVolume ScalewayVolume `json:"base_volume,omitempty"` +} + +// ScalewayOneSnapshot represents the response of a GET /snapshots/UUID API call +type ScalewayOneSnapshot struct { + Snapshot ScalewaySnapshot `json:"snapshot,omitempty"` +} + +// ScalewaySnapshots represents a group of Scaleway snapshots +type ScalewaySnapshots struct { + // Snapshots holds scaleway snapshots of the response + Snapshots []ScalewaySnapshot `json:"snapshots,omitempty"` +} + +// ScalewayBootscript represents a Scaleway Bootscript +type ScalewayBootscript struct { + Bootcmdargs string `json:"bootcmdargs,omitempty"` + Dtb string `json:"dtb,omitempty"` + Initrd string `json:"initrd,omitempty"` + Kernel string `json:"kernel,omitempty"` + + // Arch is the architecture target of the bootscript + Arch string `json:"architecture,omitempty"` + + // Identifier is a unique identifier for the bootscript + Identifier string `json:"id,omitempty"` + + // Organization is the owner of the bootscript + Organization string `json:"organization,omitempty"` + + // Name is a user-defined name for the bootscript + Title string `json:"title,omitempty"` + + // Public is true for public bootscripts and false for user bootscripts + Public bool `json:"public,omitempty"` + + Default bool `json:"default,omitempty"` +} + +// ScalewayOneBootscript represents the response of a GET /bootscripts/UUID API call +type ScalewayOneBootscript struct { + Bootscript ScalewayBootscript `json:"bootscript,omitempty"` +} + +// ScalewayBootscripts represents a group of Scaleway bootscripts +type ScalewayBootscripts struct { + // Bootscripts holds Scaleway bootscripts of the response + Bootscripts []ScalewayBootscript `json:"bootscripts,omitempty"` +} + +// ScalewayTask represents a Scaleway Task +type ScalewayTask struct { + // Identifier is a unique identifier for the task + Identifier string `json:"id,omitempty"` + + // StartDate is the start date of the task + StartDate string `json:"started_at,omitempty"` + + // TerminationDate is the termination date of the task + TerminationDate string `json:"terminated_at,omitempty"` + + HrefFrom string `json:"href_from,omitempty"` + + Description string `json:"description,omitempty"` + + Status string `json:"status,omitempty"` + + Progress int `json:"progress,omitempty"` +} + +// ScalewayOneTask represents the response of a GET /tasks/UUID API call +type ScalewayOneTask struct { + Task ScalewayTask `json:"task,omitempty"` +} + +// ScalewayTasks represents a group of Scaleway tasks +type ScalewayTasks struct { + // Tasks holds scaleway tasks of the response + Tasks []ScalewayTask `json:"tasks,omitempty"` +} + +// ScalewaySecurityGroupRule definition +type ScalewaySecurityGroupRule struct { + Direction string `json:"direction"` + Protocol string `json:"protocol"` + IPRange string `json:"ip_range"` + DestPortFrom int `json:"dest_port_from,omitempty"` + Action string `json:"action"` + Position int `json:"position"` + DestPortTo string `json:"dest_port_to"` + Editable bool `json:"editable"` + ID string `json:"id"` +} + +// ScalewayGetSecurityGroupRules represents the response of a GET /security_group/{groupID}/rules +type ScalewayGetSecurityGroupRules struct { + Rules []ScalewaySecurityGroupRule `json:"rules"` +} + +// ScalewayGetSecurityGroupRule represents the response of a GET /security_group/{groupID}/rules/{ruleID} +type ScalewayGetSecurityGroupRule struct { + Rules ScalewaySecurityGroupRule `json:"rule"` +} + +// ScalewayNewSecurityGroupRule definition POST/PUT request /security_group/{groupID} +type ScalewayNewSecurityGroupRule struct { + Action string `json:"action"` + Direction string `json:"direction"` + IPRange string `json:"ip_range"` + Protocol string `json:"protocol"` + DestPortFrom int `json:"dest_port_from,omitempty"` +} + +// ScalewaySecurityGroups definition +type ScalewaySecurityGroups struct { + Description string `json:"description"` + ID string `json:"id"` + Organization string `json:"organization"` + Name string `json:"name"` + Servers []ScalewaySecurityGroup `json:"servers"` + EnableDefaultSecurity bool `json:"enable_default_security"` + OrganizationDefault bool `json:"organization_default"` +} + +// ScalewayGetSecurityGroups represents the response of a GET /security_groups/ +type ScalewayGetSecurityGroups struct { + SecurityGroups []ScalewaySecurityGroups `json:"security_groups"` +} + +// ScalewayGetSecurityGroup represents the response of a GET /security_groups/{groupID} +type ScalewayGetSecurityGroup struct { + SecurityGroups ScalewaySecurityGroups `json:"security_group"` +} + +// ScalewayIPDefinition represents the IP's fields +type ScalewayIPDefinition struct { + Organization string `json:"organization"` + Reverse *string `json:"reverse"` + ID string `json:"id"` + Server *struct { + Identifier string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + } `json:"server"` + Address string `json:"address"` +} + +// ScalewayGetIPS represents the response of a GET /ips/ +type ScalewayGetIPS struct { + IPS []ScalewayIPDefinition `json:"ips"` +} + +// ScalewayGetIP represents the response of a GET /ips/{id_ip} +type ScalewayGetIP struct { + IP ScalewayIPDefinition `json:"ip"` +} + +// ScalewaySecurityGroup represents a Scaleway security group +type ScalewaySecurityGroup struct { + // Identifier is a unique identifier for the security group + Identifier string `json:"id,omitempty"` + + // Name is the user-defined name of the security group + Name string `json:"name,omitempty"` +} + +// ScalewayNewSecurityGroup definition POST request /security_groups +type ScalewayNewSecurityGroup struct { + Organization string `json:"organization"` + Name string `json:"name"` + Description string `json:"description"` +} + +// ScalewayUpdateSecurityGroup definition PUT request /security_groups +type ScalewayUpdateSecurityGroup struct { + Organization string `json:"organization"` + Name string `json:"name"` + Description string `json:"description"` + OrganizationDefault bool `json:"organization_default"` +} + +// ScalewayServer represents a Scaleway server +type ScalewayServer struct { + // Arch is the architecture target of the server + Arch string `json:"arch,omitempty"` + + // Identifier is a unique identifier for the server + Identifier string `json:"id,omitempty"` + + // Name is the user-defined name of the server + Name string `json:"name,omitempty"` + + // CreationDate is the creation date of the server + CreationDate string `json:"creation_date,omitempty"` + + // ModificationDate is the date of the last modification of the server + ModificationDate string `json:"modification_date,omitempty"` + + // Image is the image used by the server + Image ScalewayImage `json:"image,omitempty"` + + // DynamicIPRequired is a flag that defines a server with a dynamic ip address attached + DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` + + // PublicIP is the public IP address bound to the server + PublicAddress ScalewayIPAddress `json:"public_ip,omitempty"` + + // State is the current status of the server + State string `json:"state,omitempty"` + + // StateDetail is the detailed status of the server + StateDetail string `json:"state_detail,omitempty"` + + // PrivateIP represents the private IPV4 attached to the server (changes on each boot) + PrivateIP string `json:"private_ip,omitempty"` + + // Bootscript is the unique identifier of the selected bootscript + Bootscript *ScalewayBootscript `json:"bootscript,omitempty"` + + // Hostname represents the ServerName in a format compatible with unix's hostname + Hostname string `json:"hostname,omitempty"` + + // Tags represents user-defined tags + Tags []string `json:"tags,omitempty"` + + // Volumes are the attached volumes + Volumes map[string]ScalewayVolume `json:"volumes,omitempty"` + + // SecurityGroup is the selected security group object + SecurityGroup ScalewaySecurityGroup `json:"security_group,omitempty"` + + // Organization is the owner of the server + Organization string `json:"organization,omitempty"` + + // CommercialType is the commercial type of the server (i.e: C1, C2[SML], VC1S) + CommercialType string `json:"commercial_type,omitempty"` + + // Location of the server + Location struct { + Platform string `json:"platform_id,omitempty"` + Chassis string `json:"chassis_id,omitempty"` + Cluster string `json:"cluster_id,omitempty"` + Hypervisor string `json:"hypervisor_id,omitempty"` + Blade string `json:"blade_id,omitempty"` + Node string `json:"node_id,omitempty"` + ZoneID string `json:"zone_id,omitempty"` + } `json:"location,omitempty"` + + IPV6 *ScalewayIPV6Definition `json:"ipv6,omitempty"` + + EnableIPV6 bool `json:"enable_ipv6,omitempty"` + + // This fields are not returned by the API, we generate it + DNSPublic string `json:"dns_public,omitempty"` + DNSPrivate string `json:"dns_private,omitempty"` +} + +// ScalewayIPV6Definition represents a Scaleway ipv6 +type ScalewayIPV6Definition struct { + Netmask string `json:"netmask"` + Gateway string `json:"gateway"` + Address string `json:"address"` +} + +// ScalewayServerPatchDefinition represents a Scaleway server with nullable fields (for PATCH) +type ScalewayServerPatchDefinition struct { + Arch *string `json:"arch,omitempty"` + Name *string `json:"name,omitempty"` + CreationDate *string `json:"creation_date,omitempty"` + ModificationDate *string `json:"modification_date,omitempty"` + Image *ScalewayImage `json:"image,omitempty"` + DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` + PublicAddress *ScalewayIPAddress `json:"public_ip,omitempty"` + State *string `json:"state,omitempty"` + StateDetail *string `json:"state_detail,omitempty"` + PrivateIP *string `json:"private_ip,omitempty"` + Bootscript *string `json:"bootscript,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Volumes *map[string]ScalewayVolume `json:"volumes,omitempty"` + SecurityGroup *ScalewaySecurityGroup `json:"security_group,omitempty"` + Organization *string `json:"organization,omitempty"` + Tags *[]string `json:"tags,omitempty"` + IPV6 *ScalewayIPV6Definition `json:"ipv6,omitempty"` + EnableIPV6 *bool `json:"enable_ipv6,omitempty"` +} + +// ScalewayServerDefinition represents a Scaleway server with image definition +type ScalewayServerDefinition struct { + // Name is the user-defined name of the server + Name string `json:"name"` + + // Image is the image used by the server + Image *string `json:"image,omitempty"` + + // Volumes are the attached volumes + Volumes map[string]string `json:"volumes,omitempty"` + + // DynamicIPRequired is a flag that defines a server with a dynamic ip address attached + DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` + + // Bootscript is the bootscript used by the server + Bootscript *string `json:"bootscript"` + + // Tags are the metadata tags attached to the server + Tags []string `json:"tags,omitempty"` + + // Organization is the owner of the server + Organization string `json:"organization"` + + // CommercialType is the commercial type of the server (i.e: C1, C2[SML], VC1S) + CommercialType string `json:"commercial_type"` + + PublicIP string `json:"public_ip,omitempty"` + + EnableIPV6 bool `json:"enable_ipv6,omitempty"` + + SecurityGroup string `json:"security_group,omitempty"` +} + +// ScalewayOneServer represents the response of a GET /servers/UUID API call +type ScalewayOneServer struct { + Server ScalewayServer `json:"server,omitempty"` +} + +// ScalewayServers represents a group of Scaleway servers +type ScalewayServers struct { + // Servers holds scaleway servers of the response + Servers []ScalewayServer `json:"servers,omitempty"` +} + +// ScalewayServerAction represents an action to perform on a Scaleway server +type ScalewayServerAction struct { + // Action is the name of the action to trigger + Action string `json:"action,omitempty"` +} + +// ScalewaySnapshotDefinition represents a Scaleway snapshot definition +type ScalewaySnapshotDefinition struct { + VolumeIDentifier string `json:"volume_id"` + Name string `json:"name,omitempty"` + Organization string `json:"organization"` +} + +// ScalewayImageDefinition represents a Scaleway image definition +type ScalewayImageDefinition struct { + SnapshotIDentifier string `json:"root_volume"` + Name string `json:"name,omitempty"` + Organization string `json:"organization"` + Arch string `json:"arch"` + DefaultBootscript *string `json:"default_bootscript,omitempty"` +} + +// ScalewayRoleDefinition represents a Scaleway Token UserId Role +type ScalewayRoleDefinition struct { + Organization ScalewayOrganizationDefinition `json:"organization,omitempty"` + Role string `json:"role,omitempty"` +} + +// ScalewayTokenDefinition represents a Scaleway Token +type ScalewayTokenDefinition struct { + UserID string `json:"user_id"` + Description string `json:"description,omitempty"` + Roles ScalewayRoleDefinition `json:"roles"` + Expires string `json:"expires"` + InheritsUsersPerms bool `json:"inherits_user_perms"` + ID string `json:"id"` +} + +// ScalewayTokensDefinition represents a Scaleway Tokens +type ScalewayTokensDefinition struct { + Token ScalewayTokenDefinition `json:"token"` +} + +// ScalewayGetTokens represents a list of Scaleway Tokens +type ScalewayGetTokens struct { + Tokens []ScalewayTokenDefinition `json:"tokens"` +} + +// ScalewayContainerData represents a Scaleway container data (S3) +type ScalewayContainerData struct { + LastModified string `json:"last_modified"` + Name string `json:"name"` + Size string `json:"size"` +} + +// ScalewayGetContainerDatas represents a list of Scaleway containers data (S3) +type ScalewayGetContainerDatas struct { + Container []ScalewayContainerData `json:"container"` +} + +// ScalewayContainer represents a Scaleway container (S3) +type ScalewayContainer struct { + ScalewayOrganizationDefinition `json:"organization"` + Name string `json:"name"` + Size string `json:"size"` +} + +// ScalewayGetContainers represents a list of Scaleway containers (S3) +type ScalewayGetContainers struct { + Containers []ScalewayContainer `json:"containers"` +} + +// ScalewayConnectResponse represents the answer from POST /tokens +type ScalewayConnectResponse struct { + Token ScalewayTokenDefinition `json:"token"` +} + +// ScalewayConnect represents the data to connect +type ScalewayConnect struct { + Email string `json:"email"` + Password string `json:"password"` + Description string `json:"description"` + Expires bool `json:"expires"` +} + +// ScalewayOrganizationDefinition represents a Scaleway Organization +type ScalewayOrganizationDefinition struct { + ID string `json:"id"` + Name string `json:"name"` + Users []ScalewayUserDefinition `json:"users"` +} + +// ScalewayOrganizationsDefinition represents a Scaleway Organizations +type ScalewayOrganizationsDefinition struct { + Organizations []ScalewayOrganizationDefinition `json:"organizations"` +} + +// ScalewayUserDefinition represents a Scaleway User +type ScalewayUserDefinition struct { + Email string `json:"email"` + Firstname string `json:"firstname"` + Fullname string `json:"fullname"` + ID string `json:"id"` + Lastname string `json:"lastname"` + Organizations []ScalewayOrganizationDefinition `json:"organizations"` + Roles []ScalewayRoleDefinition `json:"roles"` + SSHPublicKeys []ScalewayKeyDefinition `json:"ssh_public_keys"` +} + +// ScalewayUsersDefinition represents the response of a GET /user +type ScalewayUsersDefinition struct { + User ScalewayUserDefinition `json:"user"` +} + +// ScalewayKeyDefinition represents a key +type ScalewayKeyDefinition struct { + Key string `json:"key"` + Fingerprint string `json:"fingerprint,omitempty"` +} + +// ScalewayUserPatchSSHKeyDefinition represents a User Patch +type ScalewayUserPatchSSHKeyDefinition struct { + SSHPublicKeys []ScalewayKeyDefinition `json:"ssh_public_keys"` +} + +// ScalewayDashboardResp represents a dashboard received from the API +type ScalewayDashboardResp struct { + Dashboard ScalewayDashboard +} + +// ScalewayDashboard represents a dashboard +type ScalewayDashboard struct { + VolumesCount int `json:"volumes_count"` + RunningServersCount int `json:"running_servers_count"` + ImagesCount int `json:"images_count"` + SnapshotsCount int `json:"snapshots_count"` + ServersCount int `json:"servers_count"` + IPsCount int `json:"ips_count"` +} + +// ScalewayPermissions represents the response of GET /permissions +type ScalewayPermissions map[string]ScalewayPermCategory + +// ScalewayPermCategory represents ScalewayPermissions's fields +type ScalewayPermCategory map[string][]string + +// ScalewayPermissionDefinition represents the permissions +type ScalewayPermissionDefinition struct { + Permissions ScalewayPermissions `json:"permissions"` +} + +// ScalewayUserdatas represents the response of a GET /user_data +type ScalewayUserdatas struct { + UserData []string `json:"user_data"` +} + +// ScalewayQuota represents a map of quota (name, value) +type ScalewayQuota map[string]int + +// ScalewayGetQuotas represents the response of GET /organizations/{orga_id}/quotas +type ScalewayGetQuotas struct { + Quotas ScalewayQuota `json:"quotas"` +} + +// ScalewayUserdata represents []byte +type ScalewayUserdata []byte + +// FuncMap used for json inspection +var FuncMap = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, +} + +// MarketLocalImageDefinition represents localImage of marketplace version +type MarketLocalImageDefinition struct { + Arch string `json:"arch"` + ID string `json:"id"` + Zone string `json:"zone"` +} + +// MarketLocalImages represents an array of local images +type MarketLocalImages struct { + LocalImages []MarketLocalImageDefinition `json:"local_images"` +} + +// MarketLocalImage represents local image +type MarketLocalImage struct { + LocalImages MarketLocalImageDefinition `json:"local_image"` +} + +// MarketVersionDefinition represents version of marketplace image +type MarketVersionDefinition struct { + CreationDate string `json:"creation_date"` + ID string `json:"id"` + Image struct { + ID string `json:"id"` + Name string `json:"name"` + } `json:"image"` + ModificationDate string `json:"modification_date"` + Name string `json:"name"` + MarketLocalImages +} + +// MarketVersions represents an array of marketplace image versions +type MarketVersions struct { + Versions []MarketVersionDefinition `json:"versions"` +} + +// MarketVersion represents version of marketplace image +type MarketVersion struct { + Version MarketVersionDefinition `json:"version"` +} + +// MarketImage represents MarketPlace image +type MarketImage struct { + Categories []string `json:"categories"` + CreationDate string `json:"creation_date"` + CurrentPublicVersion string `json:"current_public_version"` + Description string `json:"description"` + ID string `json:"id"` + Logo string `json:"logo"` + ModificationDate string `json:"modification_date"` + Name string `json:"name"` + Organization struct { + ID string `json:"id"` + Name string `json:"name"` + } `json:"organization"` + Public bool `json:"-"` + MarketVersions +} + +// MarketImages represents MarketPlace images +type MarketImages struct { + Images []MarketImage `json:"images"` +} + +// NewScalewayAPI creates a ready-to-use ScalewayAPI client +func NewScalewayAPI(organization, token, userAgent, region string, options ...func(*ScalewayAPI)) (*ScalewayAPI, error) { + s := &ScalewayAPI{ + // exposed + Organization: organization, + Token: token, + Logger: NewDefaultLogger(), + + // internal + client: &http.Client{}, + verbose: os.Getenv("SCW_VERBOSE_API") != "", + password: "", + userAgent: userAgent, + } + for _, option := range options { + option(s) + } + cache, err := NewScalewayCache(func() { s.Logger.Debugf("Writing cache file to disk") }) + if err != nil { + return nil, err + } + s.Cache = cache + if os.Getenv("SCW_TLSVERIFY") == "0" { + s.client.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + } + switch region { + case "par1", "": + s.computeAPI = ComputeAPIPar1 + case "ams1": + s.computeAPI = ComputeAPIAms1 + default: + return nil, fmt.Errorf("%s isn't a valid region", region) + } + s.Region = region + if url := os.Getenv("SCW_COMPUTE_API"); url != "" { + s.computeAPI = url + } + return s, nil +} + +// ClearCache clears the cache +func (s *ScalewayAPI) ClearCache() { + s.Cache.Clear() +} + +// Sync flushes out the cache to the disk +func (s *ScalewayAPI) Sync() { + s.Cache.Save() +} + +func (s *ScalewayAPI) response(method, uri string, content io.Reader) (resp *http.Response, err error) { + var ( + req *http.Request + ) + + req, err = http.NewRequest(method, uri, content) + if err != nil { + err = fmt.Errorf("response %s %s", method, uri) + return + } + req.Header.Set("X-Auth-Token", s.Token) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", s.userAgent) + s.LogHTTP(req) + if s.verbose { + dump, _ := httputil.DumpRequest(req, true) + s.Debugf("%v", string(dump)) + } else { + s.Debugf("[%s]: %v", method, uri) + } + resp, err = s.client.Do(req) + return +} + +// GetResponsePaginate fetchs all resources and returns an http.Response object for the requested resource +func (s *ScalewayAPI) GetResponsePaginate(apiURL, resource string, values url.Values) (*http.Response, error) { + resp, err := s.response("HEAD", fmt.Sprintf("%s/%s?%s", strings.TrimRight(apiURL, "/"), resource, values.Encode()), nil) + if err != nil { + return nil, err + } + + count := resp.Header.Get("X-Total-Count") + var maxElem int + if count == "" { + maxElem = 0 + } else { + maxElem, err = strconv.Atoi(count) + if err != nil { + return nil, err + } + } + + get := maxElem / perPage + if (float32(maxElem) / perPage) > float32(get) { + get++ + } + + if get <= 1 { // If there is 0 or 1 page of result, the response is not paginated + if len(values) == 0 { + return s.response("GET", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), nil) + } + return s.response("GET", fmt.Sprintf("%s/%s?%s", strings.TrimRight(apiURL, "/"), resource, values.Encode()), nil) + } + + fetchAll := !(values.Get("per_page") != "" || values.Get("page") != "") + if fetchAll { + var g errgroup.Group + + ch := make(chan *http.Response, get) + for i := 1; i <= get; i++ { + i := i // closure tricks + g.Go(func() (err error) { + var resp *http.Response + + val := url.Values{} + val.Set("per_page", fmt.Sprintf("%v", perPage)) + val.Set("page", fmt.Sprintf("%v", i)) + resp, err = s.response("GET", fmt.Sprintf("%s/%s?%s", strings.TrimRight(apiURL, "/"), resource, val.Encode()), nil) + ch <- resp + return + }) + } + if err = g.Wait(); err != nil { + return nil, err + } + newBody := make(map[string][]json.RawMessage) + body := make(map[string][]json.RawMessage) + key := "" + for i := 0; i < get; i++ { + res := <-ch + if res.StatusCode != http.StatusOK { + return res, nil + } + content, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + return nil, err + } + if err := json.Unmarshal(content, &body); err != nil { + return nil, err + } + + if i == 0 { + resp = res + for k := range body { + key = k + break + } + } + newBody[key] = append(newBody[key], body[key]...) + } + payload := new(bytes.Buffer) + if err := json.NewEncoder(payload).Encode(newBody); err != nil { + return nil, err + } + resp.Body = ioutil.NopCloser(payload) + } else { + resp, err = s.response("GET", fmt.Sprintf("%s/%s?%s", strings.TrimRight(apiURL, "/"), resource, values.Encode()), nil) + } + return resp, err +} + +// PostResponse returns an http.Response object for the updated resource +func (s *ScalewayAPI) PostResponse(apiURL, resource string, data interface{}) (*http.Response, error) { + payload := new(bytes.Buffer) + if err := json.NewEncoder(payload).Encode(data); err != nil { + return nil, err + } + return s.response("POST", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), payload) +} + +// PatchResponse returns an http.Response object for the updated resource +func (s *ScalewayAPI) PatchResponse(apiURL, resource string, data interface{}) (*http.Response, error) { + payload := new(bytes.Buffer) + if err := json.NewEncoder(payload).Encode(data); err != nil { + return nil, err + } + return s.response("PATCH", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), payload) +} + +// PutResponse returns an http.Response object for the updated resource +func (s *ScalewayAPI) PutResponse(apiURL, resource string, data interface{}) (*http.Response, error) { + payload := new(bytes.Buffer) + if err := json.NewEncoder(payload).Encode(data); err != nil { + return nil, err + } + return s.response("PUT", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), payload) +} + +// DeleteResponse returns an http.Response object for the deleted resource +func (s *ScalewayAPI) DeleteResponse(apiURL, resource string) (*http.Response, error) { + return s.response("DELETE", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), nil) +} + +// handleHTTPError checks the statusCode and displays the error +func (s *ScalewayAPI) handleHTTPError(goodStatusCode []int, resp *http.Response) ([]byte, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if s.verbose { + resp.Body = ioutil.NopCloser(bytes.NewBuffer(body)) + dump, err := httputil.DumpResponse(resp, true) + if err == nil { + var js bytes.Buffer + + err = json.Indent(&js, body, "", " ") + if err != nil { + s.Debugf("[Response]: [%v]\n%v", resp.StatusCode, string(dump)) + } else { + s.Debugf("[Response]: [%v]\n%v", resp.StatusCode, js.String()) + } + } + } else { + s.Debugf("[Response]: [%v]\n%v", resp.StatusCode, string(body)) + } + + if resp.StatusCode >= http.StatusInternalServerError { + return nil, errors.New(string(body)) + } + good := false + for _, code := range goodStatusCode { + if code == resp.StatusCode { + good = true + } + } + if !good { + var scwError ScalewayAPIError + + if err := json.Unmarshal(body, &scwError); err != nil { + return nil, err + } + scwError.StatusCode = resp.StatusCode + s.Debugf("%s", scwError.Error()) + return nil, scwError + } + return body, nil +} + +func (s *ScalewayAPI) fetchServers(api string, query url.Values, out chan<- ScalewayServers) func() error { + return func() error { + resp, err := s.GetResponsePaginate(api, "servers", query) + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return err + } + var servers ScalewayServers + + if err = json.Unmarshal(body, &servers); err != nil { + return err + } + out <- servers + return nil + } +} + +// GetServers gets the list of servers from the ScalewayAPI +func (s *ScalewayAPI) GetServers(all bool, limit int) (*[]ScalewayServer, error) { + query := url.Values{} + if !all { + query.Set("state", "running") + } + if limit > 0 { + // FIXME: wait for the API to be ready + // query.Set("per_page", strconv.Itoa(limit)) + panic("Not implemented yet") + } + if all && limit == 0 { + s.Cache.ClearServers() + } + var ( + g errgroup.Group + apis = []string{ + ComputeAPIPar1, + ComputeAPIAms1, + } + ) + + serverChan := make(chan ScalewayServers, 2) + for _, api := range apis { + g.Go(s.fetchServers(api, query, serverChan)) + } + + if err := g.Wait(); err != nil { + return nil, err + } + close(serverChan) + var servers ScalewayServers + + for server := range serverChan { + servers.Servers = append(servers.Servers, server.Servers...) + } + + for i, server := range servers.Servers { + servers.Servers[i].DNSPublic = server.Identifier + URLPublicDNS + servers.Servers[i].DNSPrivate = server.Identifier + URLPrivateDNS + s.Cache.InsertServer(server.Identifier, server.Location.ZoneID, server.Arch, server.Organization, server.Name) + } + return &servers.Servers, nil +} + +// ScalewaySortServers represents a wrapper to sort by CreationDate the servers +type ScalewaySortServers []ScalewayServer + +func (s ScalewaySortServers) Len() int { + return len(s) +} + +func (s ScalewaySortServers) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s ScalewaySortServers) Less(i, j int) bool { + date1, _ := time.Parse("2006-01-02T15:04:05.000000+00:00", s[i].CreationDate) + date2, _ := time.Parse("2006-01-02T15:04:05.000000+00:00", s[j].CreationDate) + return date2.Before(date1) +} + +// GetServer gets a server from the ScalewayAPI +func (s *ScalewayAPI) GetServer(serverID string) (*ScalewayServer, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "servers/"+serverID, url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + + var oneServer ScalewayOneServer + + if err = json.Unmarshal(body, &oneServer); err != nil { + return nil, err + } + // FIXME arch, owner, title + oneServer.Server.DNSPublic = oneServer.Server.Identifier + URLPublicDNS + oneServer.Server.DNSPrivate = oneServer.Server.Identifier + URLPrivateDNS + s.Cache.InsertServer(oneServer.Server.Identifier, oneServer.Server.Location.ZoneID, oneServer.Server.Arch, oneServer.Server.Organization, oneServer.Server.Name) + return &oneServer.Server, nil +} + +// PostServerAction posts an action on a server +func (s *ScalewayAPI) PostServerAction(serverID, action string) error { + data := ScalewayServerAction{ + Action: action, + } + resp, err := s.PostResponse(s.computeAPI, fmt.Sprintf("servers/%s/action", serverID), data) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusAccepted}, resp) + return err +} + +// DeleteServer deletes a server +func (s *ScalewayAPI) DeleteServer(serverID string) error { + defer s.Cache.RemoveServer(serverID) + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("servers/%s", serverID)) + if err != nil { + return err + } + defer resp.Body.Close() + + if _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp); err != nil { + return err + } + return nil +} + +// PostServer creates a new server +func (s *ScalewayAPI) PostServer(definition ScalewayServerDefinition) (string, error) { + definition.Organization = s.Organization + + resp, err := s.PostResponse(s.computeAPI, "servers", definition) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) + if err != nil { + return "", err + } + var server ScalewayOneServer + + if err = json.Unmarshal(body, &server); err != nil { + return "", err + } + // FIXME arch, owner, title + s.Cache.InsertServer(server.Server.Identifier, server.Server.Location.ZoneID, server.Server.Arch, server.Server.Organization, server.Server.Name) + return server.Server.Identifier, nil +} + +// PatchUserSSHKey updates a user +func (s *ScalewayAPI) PatchUserSSHKey(UserID string, definition ScalewayUserPatchSSHKeyDefinition) error { + resp, err := s.PatchResponse(AccountAPI, fmt.Sprintf("users/%s", UserID), definition) + if err != nil { + return err + } + + defer resp.Body.Close() + + if _, err := s.handleHTTPError([]int{http.StatusOK}, resp); err != nil { + return err + } + return nil +} + +// PatchServer updates a server +func (s *ScalewayAPI) PatchServer(serverID string, definition ScalewayServerPatchDefinition) error { + resp, err := s.PatchResponse(s.computeAPI, fmt.Sprintf("servers/%s", serverID), definition) + if err != nil { + return err + } + defer resp.Body.Close() + + if _, err := s.handleHTTPError([]int{http.StatusOK}, resp); err != nil { + return err + } + return nil +} + +// PostSnapshot creates a new snapshot +func (s *ScalewayAPI) PostSnapshot(volumeID string, name string) (string, error) { + definition := ScalewaySnapshotDefinition{ + VolumeIDentifier: volumeID, + Name: name, + Organization: s.Organization, + } + resp, err := s.PostResponse(s.computeAPI, "snapshots", definition) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) + if err != nil { + return "", err + } + var snapshot ScalewayOneSnapshot + + if err = json.Unmarshal(body, &snapshot); err != nil { + return "", err + } + // FIXME arch, owner, title + s.Cache.InsertSnapshot(snapshot.Snapshot.Identifier, "", "", snapshot.Snapshot.Organization, snapshot.Snapshot.Name) + return snapshot.Snapshot.Identifier, nil +} + +// PostImage creates a new image +func (s *ScalewayAPI) PostImage(volumeID string, name string, bootscript string, arch string) (string, error) { + definition := ScalewayImageDefinition{ + SnapshotIDentifier: volumeID, + Name: name, + Organization: s.Organization, + Arch: arch, + } + if bootscript != "" { + definition.DefaultBootscript = &bootscript + } + + resp, err := s.PostResponse(s.computeAPI, "images", definition) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) + if err != nil { + return "", err + } + var image ScalewayOneImage + + if err = json.Unmarshal(body, &image); err != nil { + return "", err + } + // FIXME region, arch, owner, title + s.Cache.InsertImage(image.Image.Identifier, "", image.Image.Arch, image.Image.Organization, image.Image.Name, "") + return image.Image.Identifier, nil +} + +// PostVolume creates a new volume +func (s *ScalewayAPI) PostVolume(definition ScalewayVolumeDefinition) (string, error) { + definition.Organization = s.Organization + if definition.Type == "" { + definition.Type = "l_ssd" + } + + resp, err := s.PostResponse(s.computeAPI, "volumes", definition) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) + if err != nil { + return "", err + } + var volume ScalewayOneVolume + + if err = json.Unmarshal(body, &volume); err != nil { + return "", err + } + // FIXME: s.Cache.InsertVolume(volume.Volume.Identifier, volume.Volume.Name) + return volume.Volume.Identifier, nil +} + +// PutVolume updates a volume +func (s *ScalewayAPI) PutVolume(volumeID string, definition ScalewayVolumePutDefinition) error { + resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("volumes/%s", volumeID), definition) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// ResolveServer attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveServer(needle string) (ScalewayResolverResults, error) { + servers, err := s.Cache.LookUpServers(needle, true) + if err != nil { + return servers, err + } + if len(servers) == 0 { + if _, err = s.GetServers(true, 0); err != nil { + return nil, err + } + servers, err = s.Cache.LookUpServers(needle, true) + } + return servers, err +} + +// ResolveVolume attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveVolume(needle string) (ScalewayResolverResults, error) { + volumes, err := s.Cache.LookUpVolumes(needle, true) + if err != nil { + return volumes, err + } + if len(volumes) == 0 { + if _, err = s.GetVolumes(); err != nil { + return nil, err + } + volumes, err = s.Cache.LookUpVolumes(needle, true) + } + return volumes, err +} + +// ResolveSnapshot attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveSnapshot(needle string) (ScalewayResolverResults, error) { + snapshots, err := s.Cache.LookUpSnapshots(needle, true) + if err != nil { + return snapshots, err + } + if len(snapshots) == 0 { + if _, err = s.GetSnapshots(); err != nil { + return nil, err + } + snapshots, err = s.Cache.LookUpSnapshots(needle, true) + } + return snapshots, err +} + +// ResolveImage attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveImage(needle string) (ScalewayResolverResults, error) { + images, err := s.Cache.LookUpImages(needle, true) + if err != nil { + return images, err + } + if len(images) == 0 { + if _, err = s.GetImages(); err != nil { + return nil, err + } + images, err = s.Cache.LookUpImages(needle, true) + } + return images, err +} + +// ResolveBootscript attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveBootscript(needle string) (ScalewayResolverResults, error) { + bootscripts, err := s.Cache.LookUpBootscripts(needle, true) + if err != nil { + return bootscripts, err + } + if len(bootscripts) == 0 { + if _, err = s.GetBootscripts(); err != nil { + return nil, err + } + bootscripts, err = s.Cache.LookUpBootscripts(needle, true) + } + return bootscripts, err +} + +// GetImages gets the list of images from the ScalewayAPI +func (s *ScalewayAPI) GetImages() (*[]MarketImage, error) { + images, err := s.GetMarketPlaceImages("") + if err != nil { + return nil, err + } + s.Cache.ClearImages() + for i, image := range images.Images { + if image.CurrentPublicVersion != "" { + for _, version := range image.Versions { + if version.ID == image.CurrentPublicVersion { + for _, localImage := range version.LocalImages { + images.Images[i].Public = true + s.Cache.InsertImage(localImage.ID, localImage.Zone, localImage.Arch, image.Organization.ID, image.Name, image.CurrentPublicVersion) + } + } + } + } + } + values := url.Values{} + values.Set("organization", s.Organization) + resp, err := s.GetResponsePaginate(s.computeAPI, "images", values) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var OrgaImages ScalewayImages + + if err = json.Unmarshal(body, &OrgaImages); err != nil { + return nil, err + } + + for _, orgaImage := range OrgaImages.Images { + images.Images = append(images.Images, MarketImage{ + Categories: []string{"MyImages"}, + CreationDate: orgaImage.CreationDate, + CurrentPublicVersion: orgaImage.Identifier, + ModificationDate: orgaImage.ModificationDate, + Name: orgaImage.Name, + Public: false, + MarketVersions: MarketVersions{ + Versions: []MarketVersionDefinition{ + { + CreationDate: orgaImage.CreationDate, + ID: orgaImage.Identifier, + ModificationDate: orgaImage.ModificationDate, + MarketLocalImages: MarketLocalImages{ + LocalImages: []MarketLocalImageDefinition{ + { + Arch: orgaImage.Arch, + ID: orgaImage.Identifier, + // TODO: fecth images from ams1 and par1 + Zone: s.Region, + }, + }, + }, + }, + }, + }, + }) + s.Cache.InsertImage(orgaImage.Identifier, s.Region, orgaImage.Arch, orgaImage.Organization, orgaImage.Name, "") + } + return &images.Images, nil +} + +// GetImage gets an image from the ScalewayAPI +func (s *ScalewayAPI) GetImage(imageID string) (*ScalewayImage, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "images/"+imageID, url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var oneImage ScalewayOneImage + + if err = json.Unmarshal(body, &oneImage); err != nil { + return nil, err + } + // FIXME owner, title + s.Cache.InsertImage(oneImage.Image.Identifier, s.Region, oneImage.Image.Arch, oneImage.Image.Organization, oneImage.Image.Name, "") + return &oneImage.Image, nil +} + +// DeleteImage deletes a image +func (s *ScalewayAPI) DeleteImage(imageID string) error { + defer s.Cache.RemoveImage(imageID) + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("images/%s", imageID)) + if err != nil { + return err + } + defer resp.Body.Close() + + if _, err := s.handleHTTPError([]int{http.StatusNoContent}, resp); err != nil { + return err + } + return nil +} + +// DeleteSnapshot deletes a snapshot +func (s *ScalewayAPI) DeleteSnapshot(snapshotID string) error { + defer s.Cache.RemoveSnapshot(snapshotID) + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("snapshots/%s", snapshotID)) + if err != nil { + return err + } + defer resp.Body.Close() + + if _, err := s.handleHTTPError([]int{http.StatusNoContent}, resp); err != nil { + return err + } + return nil +} + +// DeleteVolume deletes a volume +func (s *ScalewayAPI) DeleteVolume(volumeID string) error { + defer s.Cache.RemoveVolume(volumeID) + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("volumes/%s", volumeID)) + if err != nil { + return err + } + defer resp.Body.Close() + + if _, err := s.handleHTTPError([]int{http.StatusNoContent}, resp); err != nil { + return err + } + return nil +} + +// GetSnapshots gets the list of snapshots from the ScalewayAPI +func (s *ScalewayAPI) GetSnapshots() (*[]ScalewaySnapshot, error) { + query := url.Values{} + s.Cache.ClearSnapshots() + + resp, err := s.GetResponsePaginate(s.computeAPI, "snapshots", query) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var snapshots ScalewaySnapshots + + if err = json.Unmarshal(body, &snapshots); err != nil { + return nil, err + } + for _, snapshot := range snapshots.Snapshots { + // FIXME region, arch, owner, title + s.Cache.InsertSnapshot(snapshot.Identifier, "", "", snapshot.Organization, snapshot.Name) + } + return &snapshots.Snapshots, nil +} + +// GetSnapshot gets a snapshot from the ScalewayAPI +func (s *ScalewayAPI) GetSnapshot(snapshotID string) (*ScalewaySnapshot, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "snapshots/"+snapshotID, url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var oneSnapshot ScalewayOneSnapshot + + if err = json.Unmarshal(body, &oneSnapshot); err != nil { + return nil, err + } + // FIXME region, arch, owner, title + s.Cache.InsertSnapshot(oneSnapshot.Snapshot.Identifier, "", "", oneSnapshot.Snapshot.Organization, oneSnapshot.Snapshot.Name) + return &oneSnapshot.Snapshot, nil +} + +// GetVolumes gets the list of volumes from the ScalewayAPI +func (s *ScalewayAPI) GetVolumes() (*[]ScalewayVolume, error) { + query := url.Values{} + s.Cache.ClearVolumes() + + resp, err := s.GetResponsePaginate(s.computeAPI, "volumes", query) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + + var volumes ScalewayVolumes + + if err = json.Unmarshal(body, &volumes); err != nil { + return nil, err + } + for _, volume := range volumes.Volumes { + // FIXME region, arch, owner, title + s.Cache.InsertVolume(volume.Identifier, "", "", volume.Organization, volume.Name) + } + return &volumes.Volumes, nil +} + +// GetVolume gets a volume from the ScalewayAPI +func (s *ScalewayAPI) GetVolume(volumeID string) (*ScalewayVolume, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "volumes/"+volumeID, url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var oneVolume ScalewayOneVolume + + if err = json.Unmarshal(body, &oneVolume); err != nil { + return nil, err + } + // FIXME region, arch, owner, title + s.Cache.InsertVolume(oneVolume.Volume.Identifier, "", "", oneVolume.Volume.Organization, oneVolume.Volume.Name) + return &oneVolume.Volume, nil +} + +// GetBootscripts gets the list of bootscripts from the ScalewayAPI +func (s *ScalewayAPI) GetBootscripts() (*[]ScalewayBootscript, error) { + query := url.Values{} + + s.Cache.ClearBootscripts() + resp, err := s.GetResponsePaginate(s.computeAPI, "bootscripts", query) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var bootscripts ScalewayBootscripts + + if err = json.Unmarshal(body, &bootscripts); err != nil { + return nil, err + } + for _, bootscript := range bootscripts.Bootscripts { + // FIXME region, arch, owner, title + s.Cache.InsertBootscript(bootscript.Identifier, "", bootscript.Arch, bootscript.Organization, bootscript.Title) + } + return &bootscripts.Bootscripts, nil +} + +// GetBootscript gets a bootscript from the ScalewayAPI +func (s *ScalewayAPI) GetBootscript(bootscriptID string) (*ScalewayBootscript, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "bootscripts/"+bootscriptID, url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var oneBootscript ScalewayOneBootscript + + if err = json.Unmarshal(body, &oneBootscript); err != nil { + return nil, err + } + // FIXME region, arch, owner, title + s.Cache.InsertBootscript(oneBootscript.Bootscript.Identifier, "", oneBootscript.Bootscript.Arch, oneBootscript.Bootscript.Organization, oneBootscript.Bootscript.Title) + return &oneBootscript.Bootscript, nil +} + +// GetUserdatas gets list of userdata for a server +func (s *ScalewayAPI) GetUserdatas(serverID string, metadata bool) (*ScalewayUserdatas, error) { + var uri, endpoint string + + endpoint = s.computeAPI + if metadata { + uri = "/user_data" + endpoint = MetadataAPI + } else { + uri = fmt.Sprintf("servers/%s/user_data", serverID) + } + + resp, err := s.GetResponsePaginate(endpoint, uri, url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var userdatas ScalewayUserdatas + + if err = json.Unmarshal(body, &userdatas); err != nil { + return nil, err + } + return &userdatas, nil +} + +func (s *ScalewayUserdata) String() string { + return string(*s) +} + +// GetUserdata gets a specific userdata for a server +func (s *ScalewayAPI) GetUserdata(serverID, key string, metadata bool) (*ScalewayUserdata, error) { + var uri, endpoint string + + endpoint = s.computeAPI + if metadata { + uri = fmt.Sprintf("/user_data/%s", key) + endpoint = MetadataAPI + } else { + uri = fmt.Sprintf("servers/%s/user_data/%s", serverID, key) + } + + var err error + resp, err := s.GetResponsePaginate(endpoint, uri, url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("no such user_data %q (%d)", key, resp.StatusCode) + } + var data ScalewayUserdata + data, err = ioutil.ReadAll(resp.Body) + return &data, err +} + +// PatchUserdata sets a user data +func (s *ScalewayAPI) PatchUserdata(serverID, key string, value []byte, metadata bool) error { + var resource, endpoint string + + endpoint = s.computeAPI + if metadata { + resource = fmt.Sprintf("/user_data/%s", key) + endpoint = MetadataAPI + } else { + resource = fmt.Sprintf("servers/%s/user_data/%s", serverID, key) + } + + uri := fmt.Sprintf("%s/%s", strings.TrimRight(endpoint, "/"), resource) + payload := new(bytes.Buffer) + payload.Write(value) + + req, err := http.NewRequest("PATCH", uri, payload) + if err != nil { + return err + } + + req.Header.Set("X-Auth-Token", s.Token) + req.Header.Set("Content-Type", "text/plain") + req.Header.Set("User-Agent", s.userAgent) + + s.LogHTTP(req) + + resp, err := s.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNoContent { + return nil + } + + return fmt.Errorf("cannot set user_data (%d)", resp.StatusCode) +} + +// DeleteUserdata deletes a server user_data +func (s *ScalewayAPI) DeleteUserdata(serverID, key string, metadata bool) error { + var url, endpoint string + + endpoint = s.computeAPI + if metadata { + url = fmt.Sprintf("/user_data/%s", key) + endpoint = MetadataAPI + } else { + url = fmt.Sprintf("servers/%s/user_data/%s", serverID, key) + } + + resp, err := s.DeleteResponse(endpoint, url) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// GetTasks get the list of tasks from the ScalewayAPI +func (s *ScalewayAPI) GetTasks() (*[]ScalewayTask, error) { + query := url.Values{} + resp, err := s.GetResponsePaginate(s.computeAPI, "tasks", query) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var tasks ScalewayTasks + + if err = json.Unmarshal(body, &tasks); err != nil { + return nil, err + } + return &tasks.Tasks, nil +} + +// CheckCredentials performs a dummy check to ensure we can contact the API +func (s *ScalewayAPI) CheckCredentials() error { + query := url.Values{} + + resp, err := s.GetResponsePaginate(AccountAPI, "tokens", query) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return err + } + found := false + var tokens ScalewayGetTokens + + if err = json.Unmarshal(body, &tokens); err != nil { + return err + } + for _, token := range tokens.Tokens { + if token.ID == s.Token { + found = true + break + } + } + if !found { + return fmt.Errorf("Invalid token %v", s.Token) + } + return nil +} + +// GetUserID returns the userID +func (s *ScalewayAPI) GetUserID() (string, error) { + resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("tokens/%s", s.Token), url.Values{}) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return "", err + } + var token ScalewayTokensDefinition + + if err = json.Unmarshal(body, &token); err != nil { + return "", err + } + return token.Token.UserID, nil +} + +// GetOrganization returns Organization +func (s *ScalewayAPI) GetOrganization() (*ScalewayOrganizationsDefinition, error) { + resp, err := s.GetResponsePaginate(AccountAPI, "organizations", url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var data ScalewayOrganizationsDefinition + + if err = json.Unmarshal(body, &data); err != nil { + return nil, err + } + return &data, nil +} + +// GetUser returns the user +func (s *ScalewayAPI) GetUser() (*ScalewayUserDefinition, error) { + userID, err := s.GetUserID() + if err != nil { + return nil, err + } + resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("users/%s", userID), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var user ScalewayUsersDefinition + + if err = json.Unmarshal(body, &user); err != nil { + return nil, err + } + return &user.User, nil +} + +// GetPermissions returns the permissions +func (s *ScalewayAPI) GetPermissions() (*ScalewayPermissionDefinition, error) { + resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("tokens/%s/permissions", s.Token), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var permissions ScalewayPermissionDefinition + + if err = json.Unmarshal(body, &permissions); err != nil { + return nil, err + } + return &permissions, nil +} + +// GetDashboard returns the dashboard +func (s *ScalewayAPI) GetDashboard() (*ScalewayDashboard, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "dashboard", url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var dashboard ScalewayDashboardResp + + if err = json.Unmarshal(body, &dashboard); err != nil { + return nil, err + } + return &dashboard.Dashboard, nil +} + +// GetServerID returns exactly one server matching +func (s *ScalewayAPI) GetServerID(needle string) (string, error) { + // Parses optional type prefix, i.e: "server:name" -> "name" + _, needle = parseNeedle(needle) + + servers, err := s.ResolveServer(needle) + if err != nil { + return "", fmt.Errorf("Unable to resolve server %s: %s", needle, err) + } + if len(servers) == 1 { + return servers[0].Identifier, nil + } + if len(servers) == 0 { + return "", fmt.Errorf("No such server: %s", needle) + } + return "", showResolverResults(needle, servers) +} + +func showResolverResults(needle string, results ScalewayResolverResults) error { + w := tabwriter.NewWriter(os.Stderr, 20, 1, 3, ' ', 0) + defer w.Flush() + sort.Sort(results) + fmt.Fprintf(w, " IMAGEID\tFROM\tNAME\tZONE\tARCH\n") + for _, result := range results { + if result.Arch == "" { + result.Arch = "n/a" + } + fmt.Fprintf(w, "- %s\t%s\t%s\t%s\t%s\n", result.TruncIdentifier(), result.CodeName(), result.Name, result.Region, result.Arch) + } + return fmt.Errorf("Too many candidates for %s (%d)", needle, len(results)) +} + +// GetVolumeID returns exactly one volume matching +func (s *ScalewayAPI) GetVolumeID(needle string) (string, error) { + // Parses optional type prefix, i.e: "volume:name" -> "name" + _, needle = parseNeedle(needle) + + volumes, err := s.ResolveVolume(needle) + if err != nil { + return "", fmt.Errorf("Unable to resolve volume %s: %s", needle, err) + } + if len(volumes) == 1 { + return volumes[0].Identifier, nil + } + if len(volumes) == 0 { + return "", fmt.Errorf("No such volume: %s", needle) + } + return "", showResolverResults(needle, volumes) +} + +// GetSnapshotID returns exactly one snapshot matching +func (s *ScalewayAPI) GetSnapshotID(needle string) (string, error) { + // Parses optional type prefix, i.e: "snapshot:name" -> "name" + _, needle = parseNeedle(needle) + + snapshots, err := s.ResolveSnapshot(needle) + if err != nil { + return "", fmt.Errorf("Unable to resolve snapshot %s: %s", needle, err) + } + if len(snapshots) == 1 { + return snapshots[0].Identifier, nil + } + if len(snapshots) == 0 { + return "", fmt.Errorf("No such snapshot: %s", needle) + } + return "", showResolverResults(needle, snapshots) +} + +// FilterImagesByArch removes entry that doesn't match with architecture +func FilterImagesByArch(res ScalewayResolverResults, arch string) (ret ScalewayResolverResults) { + if arch == "*" { + return res + } + for _, result := range res { + if result.Arch == arch { + ret = append(ret, result) + } + } + return +} + +// FilterImagesByRegion removes entry that doesn't match with region +func FilterImagesByRegion(res ScalewayResolverResults, region string) (ret ScalewayResolverResults) { + if region == "*" { + return res + } + for _, result := range res { + if result.Region == region { + ret = append(ret, result) + } + } + return +} + +// GetImageID returns exactly one image matching +func (s *ScalewayAPI) GetImageID(needle, arch string) (*ScalewayImageIdentifier, error) { + // Parses optional type prefix, i.e: "image:name" -> "name" + _, needle = parseNeedle(needle) + + images, err := s.ResolveImage(needle) + if err != nil { + return nil, fmt.Errorf("Unable to resolve image %s: %s", needle, err) + } + images = FilterImagesByArch(images, arch) + images = FilterImagesByRegion(images, s.Region) + if len(images) == 1 { + return &ScalewayImageIdentifier{ + Identifier: images[0].Identifier, + Arch: images[0].Arch, + // FIXME region, owner hardcoded + Region: images[0].Region, + Owner: "", + }, nil + } + if len(images) == 0 { + return nil, fmt.Errorf("No such image (zone %s, arch %s) : %s", s.Region, arch, needle) + } + return nil, showResolverResults(needle, images) +} + +// GetSecurityGroups returns a ScalewaySecurityGroups +func (s *ScalewayAPI) GetSecurityGroups() (*ScalewayGetSecurityGroups, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "security_groups", url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var securityGroups ScalewayGetSecurityGroups + + if err = json.Unmarshal(body, &securityGroups); err != nil { + return nil, err + } + return &securityGroups, nil +} + +// GetSecurityGroupRules returns a ScalewaySecurityGroupRules +func (s *ScalewayAPI) GetSecurityGroupRules(groupID string) (*ScalewayGetSecurityGroupRules, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s/rules", groupID), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var securityGroupRules ScalewayGetSecurityGroupRules + + if err = json.Unmarshal(body, &securityGroupRules); err != nil { + return nil, err + } + return &securityGroupRules, nil +} + +// GetASecurityGroupRule returns a ScalewaySecurityGroupRule +func (s *ScalewayAPI) GetASecurityGroupRule(groupID string, rulesID string) (*ScalewayGetSecurityGroupRule, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", groupID, rulesID), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var securityGroupRules ScalewayGetSecurityGroupRule + + if err = json.Unmarshal(body, &securityGroupRules); err != nil { + return nil, err + } + return &securityGroupRules, nil +} + +// GetASecurityGroup returns a ScalewaySecurityGroup +func (s *ScalewayAPI) GetASecurityGroup(groupsID string) (*ScalewayGetSecurityGroup, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s", groupsID), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var securityGroups ScalewayGetSecurityGroup + + if err = json.Unmarshal(body, &securityGroups); err != nil { + return nil, err + } + return &securityGroups, nil +} + +// PostSecurityGroup posts a group on a server +func (s *ScalewayAPI) PostSecurityGroup(group ScalewayNewSecurityGroup) error { + resp, err := s.PostResponse(s.computeAPI, "security_groups", group) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusCreated}, resp) + return err +} + +// PostSecurityGroupRule posts a rule on a server +func (s *ScalewayAPI) PostSecurityGroupRule(SecurityGroupID string, rules ScalewayNewSecurityGroupRule) error { + resp, err := s.PostResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules", SecurityGroupID), rules) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusCreated}, resp) + return err +} + +// DeleteSecurityGroup deletes a SecurityGroup +func (s *ScalewayAPI) DeleteSecurityGroup(securityGroupID string) error { + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("security_groups/%s", securityGroupID)) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// PutSecurityGroup updates a SecurityGroup +func (s *ScalewayAPI) PutSecurityGroup(group ScalewayUpdateSecurityGroup, securityGroupID string) error { + resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("security_groups/%s", securityGroupID), group) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// PutSecurityGroupRule updates a SecurityGroupRule +func (s *ScalewayAPI) PutSecurityGroupRule(rules ScalewayNewSecurityGroupRule, securityGroupID, RuleID string) error { + resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", securityGroupID, RuleID), rules) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// DeleteSecurityGroupRule deletes a SecurityGroupRule +func (s *ScalewayAPI) DeleteSecurityGroupRule(SecurityGroupID, RuleID string) error { + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", SecurityGroupID, RuleID)) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// GetContainers returns a ScalewayGetContainers +func (s *ScalewayAPI) GetContainers() (*ScalewayGetContainers, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "containers", url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var containers ScalewayGetContainers + + if err = json.Unmarshal(body, &containers); err != nil { + return nil, err + } + return &containers, nil +} + +// GetContainerDatas returns a ScalewayGetContainerDatas +func (s *ScalewayAPI) GetContainerDatas(container string) (*ScalewayGetContainerDatas, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("containers/%s", container), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var datas ScalewayGetContainerDatas + + if err = json.Unmarshal(body, &datas); err != nil { + return nil, err + } + return &datas, nil +} + +// GetIPS returns a ScalewayGetIPS +func (s *ScalewayAPI) GetIPS() (*ScalewayGetIPS, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "ips", url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var ips ScalewayGetIPS + + if err = json.Unmarshal(body, &ips); err != nil { + return nil, err + } + return &ips, nil +} + +// NewIP returns a new IP +func (s *ScalewayAPI) NewIP() (*ScalewayGetIP, error) { + var orga struct { + Organization string `json:"organization"` + } + orga.Organization = s.Organization + resp, err := s.PostResponse(s.computeAPI, "ips", orga) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) + if err != nil { + return nil, err + } + var ip ScalewayGetIP + + if err = json.Unmarshal(body, &ip); err != nil { + return nil, err + } + return &ip, nil +} + +// AttachIP attachs an IP to a server +func (s *ScalewayAPI) AttachIP(ipID, serverID string) error { + var update struct { + Address string `json:"address"` + ID string `json:"id"` + Reverse *string `json:"reverse"` + Organization string `json:"organization"` + Server string `json:"server"` + } + + ip, err := s.GetIP(ipID) + if err != nil { + return err + } + update.Address = ip.IP.Address + update.ID = ip.IP.ID + update.Organization = ip.IP.Organization + update.Server = serverID + resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("ips/%s", ipID), update) + if err != nil { + return err + } + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// DetachIP detaches an IP from a server +func (s *ScalewayAPI) DetachIP(ipID string) error { + ip, err := s.GetIP(ipID) + if err != nil { + return err + } + ip.IP.Server = nil + resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("ips/%s", ipID), ip.IP) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// DeleteIP deletes an IP +func (s *ScalewayAPI) DeleteIP(ipID string) error { + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("ips/%s", ipID)) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// GetIP returns a ScalewayGetIP +func (s *ScalewayAPI) GetIP(ipID string) (*ScalewayGetIP, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("ips/%s", ipID), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var ip ScalewayGetIP + + if err = json.Unmarshal(body, &ip); err != nil { + return nil, err + } + return &ip, nil +} + +// GetQuotas returns a ScalewayGetQuotas +func (s *ScalewayAPI) GetQuotas() (*ScalewayGetQuotas, error) { + resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("organizations/%s/quotas", s.Organization), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var quotas ScalewayGetQuotas + + if err = json.Unmarshal(body, "as); err != nil { + return nil, err + } + return "as, nil +} + +// GetBootscriptID returns exactly one bootscript matching +func (s *ScalewayAPI) GetBootscriptID(needle, arch string) (string, error) { + // Parses optional type prefix, i.e: "bootscript:name" -> "name" + _, needle = parseNeedle(needle) + + bootscripts, err := s.ResolveBootscript(needle) + if err != nil { + return "", fmt.Errorf("Unable to resolve bootscript %s: %s", needle, err) + } + bootscripts.FilterByArch(arch) + if len(bootscripts) == 1 { + return bootscripts[0].Identifier, nil + } + if len(bootscripts) == 0 { + return "", fmt.Errorf("No such bootscript: %s", needle) + } + return "", showResolverResults(needle, bootscripts) +} + +func rootNetDial(network, addr string) (net.Conn, error) { + dialer := net.Dialer{ + Timeout: 10 * time.Second, + KeepAlive: 10 * time.Second, + } + + // bruteforce privileged ports + var localAddr net.Addr + var err error + for port := 1; port <= 1024; port++ { + localAddr, err = net.ResolveTCPAddr("tcp", fmt.Sprintf(":%d", port)) + + // this should never happen + if err != nil { + return nil, err + } + + dialer.LocalAddr = localAddr + + conn, err := dialer.Dial(network, addr) + + // if err is nil, dialer.Dial succeed, so let's go + // else, err != nil, but we don't care + if err == nil { + return conn, nil + } + } + // if here, all privileged ports were tried without success + return nil, fmt.Errorf("bind: permission denied, are you root ?") +} + +// SetPassword register the password +func (s *ScalewayAPI) SetPassword(password string) { + s.password = password +} + +// GetMarketPlaceImages returns images from marketplace +func (s *ScalewayAPI) GetMarketPlaceImages(uuidImage string) (*MarketImages, error) { + resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%s", uuidImage), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var ret MarketImages + + if uuidImage != "" { + ret.Images = make([]MarketImage, 1) + + var img MarketImage + + if err = json.Unmarshal(body, &img); err != nil { + return nil, err + } + ret.Images[0] = img + } else { + if err = json.Unmarshal(body, &ret); err != nil { + return nil, err + } + } + return &ret, nil +} + +// GetMarketPlaceImageVersions returns image version +func (s *ScalewayAPI) GetMarketPlaceImageVersions(uuidImage, uuidVersion string) (*MarketVersions, error) { + resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s", uuidImage, uuidVersion), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var ret MarketVersions + + if uuidImage != "" { + var version MarketVersion + ret.Versions = make([]MarketVersionDefinition, 1) + + if err = json.Unmarshal(body, &version); err != nil { + return nil, err + } + ret.Versions[0] = version.Version + } else { + if err = json.Unmarshal(body, &ret); err != nil { + return nil, err + } + } + return &ret, nil +} + +// GetMarketPlaceImageCurrentVersion return the image current version +func (s *ScalewayAPI) GetMarketPlaceImageCurrentVersion(uuidImage string) (*MarketVersion, error) { + resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%v/versions/current", uuidImage), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var ret MarketVersion + + if err = json.Unmarshal(body, &ret); err != nil { + return nil, err + } + return &ret, nil +} + +// GetMarketPlaceLocalImages returns images from local region +func (s *ScalewayAPI) GetMarketPlaceLocalImages(uuidImage, uuidVersion, uuidLocalImage string) (*MarketLocalImages, error) { + resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%s", uuidImage, uuidVersion, uuidLocalImage), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var ret MarketLocalImages + if uuidLocalImage != "" { + var localImage MarketLocalImage + ret.LocalImages = make([]MarketLocalImageDefinition, 1) + + if err = json.Unmarshal(body, &localImage); err != nil { + return nil, err + } + ret.LocalImages[0] = localImage.LocalImages + } else { + if err = json.Unmarshal(body, &ret); err != nil { + return nil, err + } + } + return &ret, nil +} + +// PostMarketPlaceImage adds new image +func (s *ScalewayAPI) PostMarketPlaceImage(images MarketImage) error { + resp, err := s.PostResponse(MarketplaceAPI, "images/", images) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusAccepted}, resp) + return err +} + +// PostMarketPlaceImageVersion adds new image version +func (s *ScalewayAPI) PostMarketPlaceImageVersion(uuidImage string, version MarketVersion) error { + resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions", uuidImage), version) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusAccepted}, resp) + return err +} + +// PostMarketPlaceLocalImage adds new local image +func (s *ScalewayAPI) PostMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string, local MarketLocalImage) error { + resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage), local) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusAccepted}, resp) + return err +} + +// PutMarketPlaceImage updates image +func (s *ScalewayAPI) PutMarketPlaceImage(uudiImage string, images MarketImage) error { + resp, err := s.PutResponse(MarketplaceAPI, fmt.Sprintf("images/%v", uudiImage), images) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// PutMarketPlaceImageVersion updates image version +func (s *ScalewayAPI) PutMarketPlaceImageVersion(uuidImage, uuidVersion string, version MarketVersion) error { + resp, err := s.PutResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%v", uuidImage, uuidVersion), version) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// PutMarketPlaceLocalImage updates local image +func (s *ScalewayAPI) PutMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string, local MarketLocalImage) error { + resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage), local) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusOK}, resp) + return err +} + +// DeleteMarketPlaceImage deletes image +func (s *ScalewayAPI) DeleteMarketPlaceImage(uudImage string) error { + resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v", uudImage)) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// DeleteMarketPlaceImageVersion delete image version +func (s *ScalewayAPI) DeleteMarketPlaceImageVersion(uuidImage, uuidVersion string) error { + resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%v", uuidImage, uuidVersion)) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// DeleteMarketPlaceLocalImage deletes local image +func (s *ScalewayAPI) DeleteMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string) error { + resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage)) + if err != nil { + return err + } + defer resp.Body.Close() + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// ResolveTTYUrl return an URL to get a tty +func (s *ScalewayAPI) ResolveTTYUrl() string { + switch s.Region { + case "par1", "": + return "https://tty-par1.scaleway.com/v2/" + case "ams1": + return "https://tty-ams1.scaleway.com" + } + return "" +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go new file mode 100644 index 000000000..1d9771a0e --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go @@ -0,0 +1,814 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +package api + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/moul/anonuuid" + "github.com/renstrom/fuzzysearch/fuzzy" +) + +const ( + // CacheRegion permits to access at the region field + CacheRegion = iota + // CacheArch permits to access at the arch field + CacheArch + // CacheOwner permits to access at the owner field + CacheOwner + // CacheTitle permits to access at the title field + CacheTitle + // CacheMarketPlaceUUID is used to determine the UUID of local images + CacheMarketPlaceUUID + // CacheMaxfield is used to determine the size of array + CacheMaxfield +) + +// ScalewayCache is used not to query the API to resolve full identifiers +type ScalewayCache struct { + // Images contains names of Scaleway images indexed by identifier + Images map[string][CacheMaxfield]string `json:"images"` + + // Snapshots contains names of Scaleway snapshots indexed by identifier + Snapshots map[string][CacheMaxfield]string `json:"snapshots"` + + // Volumes contains names of Scaleway volumes indexed by identifier + Volumes map[string][CacheMaxfield]string `json:"volumes"` + + // Bootscripts contains names of Scaleway bootscripts indexed by identifier + Bootscripts map[string][CacheMaxfield]string `json:"bootscripts"` + + // Servers contains names of Scaleway servers indexed by identifier + Servers map[string][CacheMaxfield]string `json:"servers"` + + // Path is the path to the cache file + Path string `json:"-"` + + // Modified tells if the cache needs to be overwritten or not + Modified bool `json:"-"` + + // Lock allows ScalewayCache to be used concurrently + Lock sync.Mutex `json:"-"` + + hookSave func() +} + +const ( + // IdentifierUnknown is used when we don't know explicitly the type key of the object (used for nil comparison) + IdentifierUnknown = 1 << iota + // IdentifierServer is the type key of cached server objects + IdentifierServer + // IdentifierImage is the type key of cached image objects + IdentifierImage + // IdentifierSnapshot is the type key of cached snapshot objects + IdentifierSnapshot + // IdentifierBootscript is the type key of cached bootscript objects + IdentifierBootscript + // IdentifierVolume is the type key of cached volume objects + IdentifierVolume +) + +// ScalewayResolverResult is a structure containing human-readable information +// about resolver results. This structure is used to display the user choices. +type ScalewayResolverResult struct { + Identifier string + Type int + Name string + Arch string + Needle string + RankMatch int + Region string +} + +// ScalewayResolverResults is a list of `ScalewayResolverResult` +type ScalewayResolverResults []ScalewayResolverResult + +// NewScalewayResolverResult returns a new ScalewayResolverResult +func NewScalewayResolverResult(Identifier, Name, Arch, Region string, Type int) (ScalewayResolverResult, error) { + if err := anonuuid.IsUUID(Identifier); err != nil { + return ScalewayResolverResult{}, err + } + return ScalewayResolverResult{ + Identifier: Identifier, + Type: Type, + Name: Name, + Arch: Arch, + Region: Region, + }, nil +} + +func (s ScalewayResolverResults) Len() int { + return len(s) +} + +func (s ScalewayResolverResults) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s ScalewayResolverResults) Less(i, j int) bool { + return s[i].RankMatch < s[j].RankMatch +} + +// TruncIdentifier returns first 8 characters of an Identifier (UUID) +func (s *ScalewayResolverResult) TruncIdentifier() string { + return s.Identifier[:8] +} + +func identifierTypeName(kind int) string { + switch kind { + case IdentifierServer: + return "Server" + case IdentifierImage: + return "Image" + case IdentifierSnapshot: + return "Snapshot" + case IdentifierVolume: + return "Volume" + case IdentifierBootscript: + return "Bootscript" + } + return "" +} + +// CodeName returns a full resource name with typed prefix +func (s *ScalewayResolverResult) CodeName() string { + name := strings.ToLower(s.Name) + name = regexp.MustCompile(`[^a-z0-9-]`).ReplaceAllString(name, "-") + name = regexp.MustCompile(`--+`).ReplaceAllString(name, "-") + name = strings.Trim(name, "-") + + return fmt.Sprintf("%s:%s", strings.ToLower(identifierTypeName(s.Type)), name) +} + +// FilterByArch deletes the elements which not match with arch +func (s *ScalewayResolverResults) FilterByArch(arch string) { +REDO: + for i := range *s { + if (*s)[i].Arch != arch { + (*s)[i] = (*s)[len(*s)-1] + *s = (*s)[:len(*s)-1] + goto REDO + } + } +} + +// NewScalewayCache loads a per-user cache +func NewScalewayCache(hookSave func()) (*ScalewayCache, error) { + var cache ScalewayCache + + cache.hookSave = hookSave + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + homeDir = "/tmp" + } + cachePath := filepath.Join(homeDir, ".scw-cache.db") + cache.Path = cachePath + _, err := os.Stat(cachePath) + if os.IsNotExist(err) { + cache.Clear() + return &cache, nil + } else if err != nil { + return nil, err + } + file, err := ioutil.ReadFile(cachePath) + if err != nil { + return nil, err + } + err = json.Unmarshal(file, &cache) + if err != nil { + // fix compatibility with older version + if err = os.Remove(cachePath); err != nil { + return nil, err + } + cache.Clear() + return &cache, nil + } + if cache.Images == nil { + cache.Images = make(map[string][CacheMaxfield]string) + } + if cache.Snapshots == nil { + cache.Snapshots = make(map[string][CacheMaxfield]string) + } + if cache.Volumes == nil { + cache.Volumes = make(map[string][CacheMaxfield]string) + } + if cache.Servers == nil { + cache.Servers = make(map[string][CacheMaxfield]string) + } + if cache.Bootscripts == nil { + cache.Bootscripts = make(map[string][CacheMaxfield]string) + } + return &cache, nil +} + +// Clear removes all information from the cache +func (c *ScalewayCache) Clear() { + c.Images = make(map[string][CacheMaxfield]string) + c.Snapshots = make(map[string][CacheMaxfield]string) + c.Volumes = make(map[string][CacheMaxfield]string) + c.Bootscripts = make(map[string][CacheMaxfield]string) + c.Servers = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// Flush flushes the cache database +func (c *ScalewayCache) Flush() error { + return os.Remove(c.Path) +} + +// Save atomically overwrites the current cache database +func (c *ScalewayCache) Save() error { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.hookSave() + if c.Modified { + file, err := ioutil.TempFile(filepath.Dir(c.Path), filepath.Base(c.Path)) + if err != nil { + return err + } + defer file.Close() + if err := json.NewEncoder(file).Encode(c); err != nil { + os.Remove(file.Name()) + return err + } + + if err := os.Rename(file.Name(), c.Path); err != nil { + os.Remove(file.Name()) + return err + } + } + return nil +} + +// ComputeRankMatch fills `ScalewayResolverResult.RankMatch` with its `fuzzy` score +func (s *ScalewayResolverResult) ComputeRankMatch(needle string) { + s.Needle = needle + s.RankMatch = fuzzy.RankMatch(needle, s.Name) +} + +// LookUpImages attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpImages(needle string, acceptUUID bool) (ScalewayResolverResults, error) { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + var exactMatches ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Images[needle]; ok { + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + needle = regexp.MustCompile(`^user/`).ReplaceAllString(needle, "") + // FIXME: if 'user/' is in needle, only watch for a user image + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + for identifier, fields := range c.Images { + if fields[CacheTitle] == needle { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } else if strings.HasPrefix(fields[CacheMarketPlaceUUID], needle) || nameRegex.MatchString(fields[CacheMarketPlaceUUID]) { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches, nil + } + + return removeDuplicatesResults(res), nil +} + +// LookUpSnapshots attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpSnapshots(needle string, acceptUUID bool) (ScalewayResolverResults, error) { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + var exactMatches ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Snapshots[needle]; ok { + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierSnapshot) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + needle = regexp.MustCompile(`^user/`).ReplaceAllString(needle, "") + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + for identifier, fields := range c.Snapshots { + if fields[CacheTitle] == needle { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierSnapshot) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierSnapshot) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches, nil + } + + return removeDuplicatesResults(res), nil +} + +// LookUpVolumes attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpVolumes(needle string, acceptUUID bool) (ScalewayResolverResults, error) { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + var exactMatches ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Volumes[needle]; ok { + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierVolume) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + for identifier, fields := range c.Volumes { + if fields[CacheTitle] == needle { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierVolume) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierVolume) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches, nil + } + + return removeDuplicatesResults(res), nil +} + +// LookUpBootscripts attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpBootscripts(needle string, acceptUUID bool) (ScalewayResolverResults, error) { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + var exactMatches ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Bootscripts[needle]; ok { + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierBootscript) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + for identifier, fields := range c.Bootscripts { + if fields[CacheTitle] == needle { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierBootscript) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierBootscript) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches, nil + } + + return removeDuplicatesResults(res), nil +} + +// LookUpServers attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpServers(needle string, acceptUUID bool) (ScalewayResolverResults, error) { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + var exactMatches ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Servers[needle]; ok { + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierServer) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + for identifier, fields := range c.Servers { + if fields[CacheTitle] == needle { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierServer) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], IdentifierServer) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches, nil + } + + return removeDuplicatesResults(res), nil +} + +// removeDuplicatesResults transforms an array into a unique array +func removeDuplicatesResults(elements ScalewayResolverResults) ScalewayResolverResults { + encountered := map[string]ScalewayResolverResult{} + + // Create a map of all unique elements. + for v := range elements { + encountered[elements[v].Identifier] = elements[v] + } + + // Place all keys from the map into a slice. + results := ScalewayResolverResults{} + for _, result := range encountered { + results = append(results, result) + } + return results +} + +// parseNeedle parses a user needle and try to extract a forced object type +// i.e: +// - server:blah-blah -> kind=server, needle=blah-blah +// - blah-blah -> kind="", needle=blah-blah +// - not-existing-type:blah-blah +func parseNeedle(input string) (identifierType int, needle string) { + parts := strings.Split(input, ":") + if len(parts) == 2 { + switch parts[0] { + case "server": + return IdentifierServer, parts[1] + case "image": + return IdentifierImage, parts[1] + case "snapshot": + return IdentifierSnapshot, parts[1] + case "bootscript": + return IdentifierBootscript, parts[1] + case "volume": + return IdentifierVolume, parts[1] + } + } + return IdentifierUnknown, input +} + +// LookUpIdentifiers attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpIdentifiers(needle string) (ScalewayResolverResults, error) { + results := ScalewayResolverResults{} + + identifierType, needle := parseNeedle(needle) + + if identifierType&(IdentifierUnknown|IdentifierServer) > 0 { + servers, err := c.LookUpServers(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range servers { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, IdentifierServer) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + + if identifierType&(IdentifierUnknown|IdentifierImage) > 0 { + images, err := c.LookUpImages(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range images { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + + if identifierType&(IdentifierUnknown|IdentifierSnapshot) > 0 { + snapshots, err := c.LookUpSnapshots(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range snapshots { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, IdentifierSnapshot) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + + if identifierType&(IdentifierUnknown|IdentifierVolume) > 0 { + volumes, err := c.LookUpVolumes(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range volumes { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, IdentifierVolume) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + + if identifierType&(IdentifierUnknown|IdentifierBootscript) > 0 { + bootscripts, err := c.LookUpBootscripts(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range bootscripts { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, IdentifierBootscript) + if err != nil { + return ScalewayResolverResults{}, err + } + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + return results, nil +} + +// InsertServer registers a server in the cache +func (c *ScalewayCache) InsertServer(identifier, region, arch, owner, name string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Servers[identifier] + if !exists || fields[CacheTitle] != name { + c.Servers[identifier] = [CacheMaxfield]string{region, arch, owner, name} + c.Modified = true + } +} + +// RemoveServer removes a server from the cache +func (c *ScalewayCache) RemoveServer(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Servers, identifier) + c.Modified = true +} + +// ClearServers removes all servers from the cache +func (c *ScalewayCache) ClearServers() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Servers = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// InsertImage registers an image in the cache +func (c *ScalewayCache) InsertImage(identifier, region, arch, owner, name, marketPlaceUUID string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Images[identifier] + if !exists || fields[CacheTitle] != name { + c.Images[identifier] = [CacheMaxfield]string{region, arch, owner, name, marketPlaceUUID} + c.Modified = true + } +} + +// RemoveImage removes a server from the cache +func (c *ScalewayCache) RemoveImage(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Images, identifier) + c.Modified = true +} + +// ClearImages removes all images from the cache +func (c *ScalewayCache) ClearImages() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Images = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// InsertSnapshot registers an snapshot in the cache +func (c *ScalewayCache) InsertSnapshot(identifier, region, arch, owner, name string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Snapshots[identifier] + if !exists || fields[CacheTitle] != name { + c.Snapshots[identifier] = [CacheMaxfield]string{region, arch, owner, name} + c.Modified = true + } +} + +// RemoveSnapshot removes a server from the cache +func (c *ScalewayCache) RemoveSnapshot(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Snapshots, identifier) + c.Modified = true +} + +// ClearSnapshots removes all snapshots from the cache +func (c *ScalewayCache) ClearSnapshots() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Snapshots = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// InsertVolume registers an volume in the cache +func (c *ScalewayCache) InsertVolume(identifier, region, arch, owner, name string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Volumes[identifier] + if !exists || fields[CacheTitle] != name { + c.Volumes[identifier] = [CacheMaxfield]string{region, arch, owner, name} + c.Modified = true + } +} + +// RemoveVolume removes a server from the cache +func (c *ScalewayCache) RemoveVolume(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Volumes, identifier) + c.Modified = true +} + +// ClearVolumes removes all volumes from the cache +func (c *ScalewayCache) ClearVolumes() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Volumes = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// InsertBootscript registers an bootscript in the cache +func (c *ScalewayCache) InsertBootscript(identifier, region, arch, owner, name string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Bootscripts[identifier] + if !exists || fields[CacheTitle] != name { + c.Bootscripts[identifier] = [CacheMaxfield]string{region, arch, owner, name} + c.Modified = true + } +} + +// RemoveBootscript removes a bootscript from the cache +func (c *ScalewayCache) RemoveBootscript(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Bootscripts, identifier) + c.Modified = true +} + +// ClearBootscripts removes all bootscripts from the cache +func (c *ScalewayCache) ClearBootscripts() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Bootscripts = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// GetNbServers returns the number of servers in the cache +func (c *ScalewayCache) GetNbServers() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Servers) +} + +// GetNbImages returns the number of images in the cache +func (c *ScalewayCache) GetNbImages() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Images) +} + +// GetNbSnapshots returns the number of snapshots in the cache +func (c *ScalewayCache) GetNbSnapshots() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Snapshots) +} + +// GetNbVolumes returns the number of volumes in the cache +func (c *ScalewayCache) GetNbVolumes() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Volumes) +} + +// GetNbBootscripts returns the number of bootscripts in the cache +func (c *ScalewayCache) GetNbBootscripts() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Bootscripts) +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/helpers.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/helpers.go new file mode 100644 index 000000000..3a59e465d --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/helpers.go @@ -0,0 +1,685 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +package api + +import ( + "errors" + "fmt" + "os" + "sort" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/dustin/go-humanize" + "github.com/moul/anonuuid" + "github.com/scaleway/scaleway-cli/pkg/utils" +) + +// ScalewayResolvedIdentifier represents a list of matching identifier for a specifier pattern +type ScalewayResolvedIdentifier struct { + // Identifiers holds matching identifiers + Identifiers ScalewayResolverResults + + // Needle is the criteria used to lookup identifiers + Needle string +} + +// ScalewayImageInterface is an interface to multiple Scaleway items +type ScalewayImageInterface struct { + CreationDate time.Time + Identifier string + Name string + Tag string + VirtualSize uint64 + Public bool + Type string + Organization string + Archs []string + Region []string +} + +// ResolveGateway tries to resolve a server public ip address, else returns the input string, i.e. IPv4, hostname +func ResolveGateway(api *ScalewayAPI, gateway string) (string, error) { + if gateway == "" { + return "", nil + } + + // Parses optional type prefix, i.e: "server:name" -> "name" + _, gateway = parseNeedle(gateway) + + servers, err := api.ResolveServer(gateway) + if err != nil { + return "", err + } + + if len(servers) == 0 { + return gateway, nil + } + + if len(servers) > 1 { + return "", showResolverResults(gateway, servers) + } + + // if len(servers) == 1 { + server, err := api.GetServer(servers[0].Identifier) + if err != nil { + return "", err + } + return server.PublicAddress.IP, nil +} + +// CreateVolumeFromHumanSize creates a volume on the API with a human readable size +func CreateVolumeFromHumanSize(api *ScalewayAPI, size string) (*string, error) { + bytes, err := humanize.ParseBytes(size) + if err != nil { + return nil, err + } + + var newVolume ScalewayVolumeDefinition + newVolume.Name = size + newVolume.Size = bytes + newVolume.Type = "l_ssd" + + volumeID, err := api.PostVolume(newVolume) + if err != nil { + return nil, err + } + + return &volumeID, nil +} + +// fillIdentifierCache fills the cache by fetching from the API +func fillIdentifierCache(api *ScalewayAPI, identifierType int) { + log.Debugf("Filling the cache") + var wg sync.WaitGroup + wg.Add(5) + go func() { + if identifierType&(IdentifierUnknown|IdentifierServer) > 0 { + api.GetServers(true, 0) + } + wg.Done() + }() + go func() { + if identifierType&(IdentifierUnknown|IdentifierImage) > 0 { + api.GetImages() + } + wg.Done() + }() + go func() { + if identifierType&(IdentifierUnknown|IdentifierSnapshot) > 0 { + api.GetSnapshots() + } + wg.Done() + }() + go func() { + if identifierType&(IdentifierUnknown|IdentifierVolume) > 0 { + api.GetVolumes() + } + wg.Done() + }() + go func() { + if identifierType&(IdentifierUnknown|IdentifierBootscript) > 0 { + api.GetBootscripts() + } + wg.Done() + }() + wg.Wait() +} + +// GetIdentifier returns a an identifier if the resolved needles only match one element, else, it exists the program +func GetIdentifier(api *ScalewayAPI, needle string) (*ScalewayResolverResult, error) { + idents, err := ResolveIdentifier(api, needle) + if err != nil { + return nil, err + } + + if len(idents) == 1 { + return &idents[0], nil + } + if len(idents) == 0 { + return nil, fmt.Errorf("No such identifier: %s", needle) + } + + sort.Sort(idents) + for _, identifier := range idents { + // FIXME: also print the name + fmt.Fprintf(os.Stderr, "- %s\n", identifier.Identifier) + } + return nil, fmt.Errorf("Too many candidates for %s (%d)", needle, len(idents)) +} + +// ResolveIdentifier resolves needle provided by the user +func ResolveIdentifier(api *ScalewayAPI, needle string) (ScalewayResolverResults, error) { + idents, err := api.Cache.LookUpIdentifiers(needle) + if err != nil { + return idents, err + } + if len(idents) > 0 { + return idents, nil + } + + identifierType, _ := parseNeedle(needle) + fillIdentifierCache(api, identifierType) + + return api.Cache.LookUpIdentifiers(needle) +} + +// ResolveIdentifiers resolves needles provided by the user +func ResolveIdentifiers(api *ScalewayAPI, needles []string, out chan ScalewayResolvedIdentifier) { + // first attempt, only lookup from the cache + var unresolved []string + for _, needle := range needles { + idents, err := api.Cache.LookUpIdentifiers(needle) + if err != nil { + api.Logger.Fatalf("%s", err) + } + if len(idents) == 0 { + unresolved = append(unresolved, needle) + } else { + out <- ScalewayResolvedIdentifier{ + Identifiers: idents, + Needle: needle, + } + } + } + // fill the cache by fetching from the API and resolve missing identifiers + if len(unresolved) > 0 { + // compute identifierType: + // if identifierType is the same for every unresolved needle, + // we use it directly, else, we choose IdentifierUnknown to + // fulfill every types of cache + identifierType, _ := parseNeedle(unresolved[0]) + for _, needle := range unresolved { + newIdentifierType, _ := parseNeedle(needle) + if identifierType != newIdentifierType { + identifierType = IdentifierUnknown + break + } + } + + // fill all the cache + fillIdentifierCache(api, identifierType) + + // lookup again in the cache + for _, needle := range unresolved { + idents, err := api.Cache.LookUpIdentifiers(needle) + if err != nil { + api.Logger.Fatalf("%s", err) + } + out <- ScalewayResolvedIdentifier{ + Identifiers: idents, + Needle: needle, + } + } + } + + close(out) +} + +// InspectIdentifierResult is returned by `InspectIdentifiers` and contains the inspected `Object` with its `Type` +type InspectIdentifierResult struct { + Type int + Object interface{} +} + +// InspectIdentifiers inspects identifiers concurrently +func InspectIdentifiers(api *ScalewayAPI, ci chan ScalewayResolvedIdentifier, cj chan InspectIdentifierResult, arch string) { + var wg sync.WaitGroup + for { + idents, ok := <-ci + if !ok { + break + } + idents.Identifiers = FilterImagesByArch(idents.Identifiers, arch) + idents.Identifiers = FilterImagesByRegion(idents.Identifiers, api.Region) + if len(idents.Identifiers) != 1 { + if len(idents.Identifiers) == 0 { + log.Errorf("Unable to resolve identifier %s", idents.Needle) + } else { + logrus.Fatal(showResolverResults(idents.Needle, idents.Identifiers)) + } + } else { + ident := idents.Identifiers[0] + wg.Add(1) + go func() { + var obj interface{} + var err error + + switch ident.Type { + case IdentifierServer: + obj, err = api.GetServer(ident.Identifier) + case IdentifierImage: + obj, err = api.GetImage(ident.Identifier) + case IdentifierSnapshot: + obj, err = api.GetSnapshot(ident.Identifier) + case IdentifierVolume: + obj, err = api.GetVolume(ident.Identifier) + case IdentifierBootscript: + obj, err = api.GetBootscript(ident.Identifier) + } + if err == nil && obj != nil { + cj <- InspectIdentifierResult{ + Type: ident.Type, + Object: obj, + } + } + wg.Done() + }() + } + } + wg.Wait() + close(cj) +} + +// ConfigCreateServer represents the options sent to CreateServer and defining a server +type ConfigCreateServer struct { + ImageName string + Name string + Bootscript string + Env string + AdditionalVolumes string + IP string + CommercialType string + DynamicIPRequired bool + EnableIPV6 bool +} + +// CreateServer creates a server using API based on typical server fields +func CreateServer(api *ScalewayAPI, c *ConfigCreateServer) (string, error) { + commercialType := os.Getenv("SCW_COMMERCIAL_TYPE") + if commercialType == "" { + commercialType = c.CommercialType + } + if len(commercialType) < 2 { + return "", errors.New("Invalid commercial type") + } + + if c.Name == "" { + c.Name = strings.Replace(namesgenerator.GetRandomName(0), "_", "-", -1) + } + + var server ScalewayServerDefinition + + server.CommercialType = commercialType + server.Volumes = make(map[string]string) + server.DynamicIPRequired = &c.DynamicIPRequired + server.EnableIPV6 = c.EnableIPV6 + if commercialType == "" { + return "", errors.New("You need to specify a commercial-type") + } + if c.IP != "" { + if anonuuid.IsUUID(c.IP) == nil { + server.PublicIP = c.IP + } else { + ips, err := api.GetIPS() + if err != nil { + return "", err + } + for _, ip := range ips.IPS { + if ip.Address == c.IP { + server.PublicIP = ip.ID + break + } + } + if server.PublicIP == "" { + return "", fmt.Errorf("IP address %v not found", c.IP) + } + } + } + server.Tags = []string{} + if c.Env != "" { + server.Tags = strings.Split(c.Env, " ") + } + switch c.CommercialType { + case "VC1M", "X64-4GB": + if c.AdditionalVolumes == "" { + c.AdditionalVolumes = "50G" + log.Debugf("This server needs a least 50G") + } + case "VC1L", "X64-8GB", "X64-15GB": + if c.AdditionalVolumes == "" { + c.AdditionalVolumes = "150G" + log.Debugf("This server needs a least 150G") + } + case "X64-30GB": + if c.AdditionalVolumes == "" { + c.AdditionalVolumes = "100G 150G" + log.Debugf("This server needs a least 300G") + } + case "X64-60GB": + if c.AdditionalVolumes == "" { + c.AdditionalVolumes = "50G 150G 150G" + log.Debugf("This server needs a least 400G") + } + case "X64-120GB": + if c.AdditionalVolumes == "" { + c.AdditionalVolumes = "150G 150G 150G" + log.Debugf("This server needs a least 500G") + } + } + if c.AdditionalVolumes != "" { + volumes := strings.Split(c.AdditionalVolumes, " ") + for i := range volumes { + volumeID, err := CreateVolumeFromHumanSize(api, volumes[i]) + if err != nil { + return "", err + } + + volumeIDx := fmt.Sprintf("%d", i+1) + server.Volumes[volumeIDx] = *volumeID + } + } + arch := os.Getenv("SCW_TARGET_ARCH") + if arch == "" { + server.CommercialType = strings.ToUpper(server.CommercialType) + switch server.CommercialType[:2] { + case "C1": + arch = "arm" + case "C2", "VC", "X6": + arch = "x86_64" + default: + return "", fmt.Errorf("%s wrong commercial type", server.CommercialType) + } + } + imageIdentifier := &ScalewayImageIdentifier{ + Arch: arch, + } + server.Name = c.Name + inheritingVolume := false + _, err := humanize.ParseBytes(c.ImageName) + if err == nil { + // Create a new root volume + volumeID, errCreateVol := CreateVolumeFromHumanSize(api, c.ImageName) + if errCreateVol != nil { + return "", errCreateVol + } + server.Volumes["0"] = *volumeID + } else { + // Use an existing image + inheritingVolume = true + if anonuuid.IsUUID(c.ImageName) == nil { + server.Image = &c.ImageName + } else { + imageIdentifier, err = api.GetImageID(c.ImageName, arch) + if err != nil { + return "", err + } + if imageIdentifier.Identifier != "" { + server.Image = &imageIdentifier.Identifier + } else { + snapshotID, errGetSnapID := api.GetSnapshotID(c.ImageName) + if errGetSnapID != nil { + return "", errGetSnapID + } + snapshot, errGetSnap := api.GetSnapshot(snapshotID) + if errGetSnap != nil { + return "", errGetSnap + } + if snapshot.BaseVolume.Identifier == "" { + return "", fmt.Errorf("snapshot %v does not have base volume", snapshot.Name) + } + server.Volumes["0"] = snapshot.BaseVolume.Identifier + } + } + } + + if c.Bootscript != "" { + bootscript := "" + + if anonuuid.IsUUID(c.Bootscript) == nil { + bootscript = c.Bootscript + } else { + var errGetBootScript error + + bootscript, errGetBootScript = api.GetBootscriptID(c.Bootscript, imageIdentifier.Arch) + if errGetBootScript != nil { + return "", errGetBootScript + } + } + server.Bootscript = &bootscript + } + serverID, err := api.PostServer(server) + if err != nil { + return "", err + } + + // For inherited volumes, we prefix the name with server hostname + if inheritingVolume { + createdServer, err := api.GetServer(serverID) + if err != nil { + return "", err + } + currentVolume := createdServer.Volumes["0"] + + var volumePayload ScalewayVolumePutDefinition + newName := fmt.Sprintf("%s-%s", createdServer.Hostname, currentVolume.Name) + volumePayload.Name = &newName + volumePayload.CreationDate = ¤tVolume.CreationDate + volumePayload.Organization = ¤tVolume.Organization + volumePayload.Server.Identifier = ¤tVolume.Server.Identifier + volumePayload.Server.Name = ¤tVolume.Server.Name + volumePayload.Identifier = ¤tVolume.Identifier + volumePayload.Size = ¤tVolume.Size + volumePayload.ModificationDate = ¤tVolume.ModificationDate + volumePayload.ExportURI = ¤tVolume.ExportURI + volumePayload.VolumeType = ¤tVolume.VolumeType + + err = api.PutVolume(currentVolume.Identifier, volumePayload) + if err != nil { + return "", err + } + } + + return serverID, nil +} + +// WaitForServerState asks API in a loop until a server matches a wanted state +func WaitForServerState(api *ScalewayAPI, serverID string, targetState string) (*ScalewayServer, error) { + var server *ScalewayServer + var err error + + var currentState string + + for { + server, err = api.GetServer(serverID) + if err != nil { + return nil, err + } + if currentState != server.State { + log.Infof("Server changed state to '%s'", server.State) + currentState = server.State + } + if server.State == targetState { + break + } + time.Sleep(1 * time.Second) + } + + return server, nil +} + +// WaitForServerReady wait for a server state to be running, then wait for the SSH port to be available +func WaitForServerReady(api *ScalewayAPI, serverID, gateway string) (*ScalewayServer, error) { + promise := make(chan bool) + var server *ScalewayServer + var err error + var currentState string + + go func() { + defer close(promise) + + for { + server, err = api.GetServer(serverID) + if err != nil { + promise <- false + return + } + if currentState != server.State { + log.Infof("Server changed state to '%s'", server.State) + currentState = server.State + } + if server.State == "running" { + break + } + if server.State == "stopped" { + err = fmt.Errorf("The server has been stopped") + promise <- false + return + } + time.Sleep(1 * time.Second) + } + + if gateway == "" { + dest := fmt.Sprintf("%s:22", server.PublicAddress.IP) + log.Debugf("Waiting for server SSH port %s", dest) + err = utils.WaitForTCPPortOpen(dest) + if err != nil { + promise <- false + return + } + } else { + dest := fmt.Sprintf("%s:22", gateway) + log.Debugf("Waiting for server SSH port %s", dest) + err = utils.WaitForTCPPortOpen(dest) + if err != nil { + promise <- false + return + } + log.Debugf("Check for SSH port through the gateway: %s", server.PrivateIP) + timeout := time.Tick(120 * time.Second) + for { + select { + case <-timeout: + err = fmt.Errorf("Timeout: unable to ping %s", server.PrivateIP) + goto OUT + default: + if utils.SSHExec("", server.PrivateIP, "root", 22, []string{ + "nc", + "-z", + "-w", + "1", + server.PrivateIP, + "22", + }, false, gateway, false) == nil { + goto OUT + } + time.Sleep(2 * time.Second) + } + } + OUT: + if err != nil { + logrus.Info(err) + err = nil + } + } + promise <- true + }() + + loop := 0 + for { + select { + case done := <-promise: + utils.LogQuiet("\r \r") + if !done { + return nil, err + } + return server, nil + case <-time.After(time.Millisecond * 100): + utils.LogQuiet(fmt.Sprintf("\r%c\r", "-\\|/"[loop%4])) + loop = loop + 1 + if loop == 5 { + loop = 0 + } + } + } +} + +// WaitForServerStopped wait for a server state to be stopped +func WaitForServerStopped(api *ScalewayAPI, serverID string) (*ScalewayServer, error) { + server, err := WaitForServerState(api, serverID, "stopped") + if err != nil { + return nil, err + } + return server, nil +} + +// ByCreationDate sorts images by CreationDate field +type ByCreationDate []ScalewayImageInterface + +func (a ByCreationDate) Len() int { return len(a) } +func (a ByCreationDate) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByCreationDate) Less(i, j int) bool { return a[j].CreationDate.Before(a[i].CreationDate) } + +// StartServer start a server based on its needle, can optionaly block while server is booting +func StartServer(api *ScalewayAPI, needle string, wait bool) error { + server, err := api.GetServerID(needle) + if err != nil { + return err + } + + if err = api.PostServerAction(server, "poweron"); err != nil { + return err + } + + if wait { + _, err = WaitForServerReady(api, server, "") + if err != nil { + return fmt.Errorf("failed to wait for server %s to be ready, %v", needle, err) + } + } + return nil +} + +// StartServerOnce wraps StartServer for golang channel +func StartServerOnce(api *ScalewayAPI, needle string, wait bool, successChan chan string, errChan chan error) { + err := StartServer(api, needle, wait) + + if err != nil { + errChan <- err + return + } + successChan <- needle +} + +// DeleteServerForce tries to delete a server using multiple ways +func (a *ScalewayAPI) DeleteServerForce(serverID string) error { + // FIXME: also delete attached volumes and ip address + // FIXME: call delete and stop -t in parallel to speed up process + err := a.DeleteServer(serverID) + if err == nil { + logrus.Infof("Server '%s' successfully deleted", serverID) + return nil + } + + err = a.PostServerAction(serverID, "terminate") + if err == nil { + logrus.Infof("Server '%s' successfully terminated", serverID) + return nil + } + + // FIXME: retry in a loop until timeout or Control+C + logrus.Errorf("Failed to delete server %s", serverID) + logrus.Errorf("Try to run 'scw rm -f %s' later", serverID) + return err +} + +// GetSSHFingerprintFromServer returns an array which containts ssh-host-fingerprints +func (a *ScalewayAPI) GetSSHFingerprintFromServer(serverID string) []string { + ret := []string{} + + if value, err := a.GetUserdata(serverID, "ssh-host-fingerprints", false); err == nil { + PublicKeys := strings.Split(string(*value), "\n") + for i := range PublicKeys { + if fingerprint, err := utils.SSHGetFingerprint([]byte(PublicKeys[i])); err == nil { + ret = append(ret, fingerprint) + } + } + } + return ret +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go new file mode 100644 index 000000000..58ad93716 --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go @@ -0,0 +1,77 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +package api + +import ( + "fmt" + "log" + "net/http" + "os" +) + +// Logger handles logging concerns for the Scaleway API SDK +type Logger interface { + LogHTTP(*http.Request) + Fatalf(format string, v ...interface{}) + Debugf(format string, v ...interface{}) + Infof(format string, v ...interface{}) + Warnf(format string, v ...interface{}) +} + +// NewDefaultLogger returns a logger which is configured for stdout +func NewDefaultLogger() Logger { + return &defaultLogger{ + Logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +type defaultLogger struct { + *log.Logger +} + +func (l *defaultLogger) LogHTTP(r *http.Request) { + l.Printf("%s %s\n", r.Method, r.URL.RawPath) +} + +func (l *defaultLogger) Fatalf(format string, v ...interface{}) { + l.Printf("[FATAL] %s\n", fmt.Sprintf(format, v)) + os.Exit(1) +} + +func (l *defaultLogger) Debugf(format string, v ...interface{}) { + l.Printf("[DEBUG] %s\n", fmt.Sprintf(format, v)) +} + +func (l *defaultLogger) Infof(format string, v ...interface{}) { + l.Printf("[INFO ] %s\n", fmt.Sprintf(format, v)) +} + +func (l *defaultLogger) Warnf(format string, v ...interface{}) { + l.Printf("[WARN ] %s\n", fmt.Sprintf(format, v)) +} + +type disableLogger struct { +} + +// NewDisableLogger returns a logger which is configured to do nothing +func NewDisableLogger() Logger { + return &disableLogger{} +} + +func (d *disableLogger) LogHTTP(r *http.Request) { +} + +func (d *disableLogger) Fatalf(format string, v ...interface{}) { + panic(fmt.Sprintf(format, v)) +} + +func (d *disableLogger) Debugf(format string, v ...interface{}) { +} + +func (d *disableLogger) Infof(format string, v ...interface{}) { +} + +func (d *disableLogger) Warnf(format string, v ...interface{}) { +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/sshcommand/sshcommand.go b/vendor/github.com/scaleway/scaleway-cli/pkg/sshcommand/sshcommand.go new file mode 100644 index 000000000..676b2f976 --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/sshcommand/sshcommand.go @@ -0,0 +1,124 @@ +package sshcommand + +import ( + "fmt" + "runtime" + "strings" +) + +// Command contains settings to build a ssh command +type Command struct { + Host string + User string + Port int + SSHOptions []string + Gateway *Command + Command []string + Debug bool + NoEscapeCommand bool + SkipHostKeyChecking bool + Quiet bool + AllocateTTY bool + EnableSSHKeyForwarding bool + + isGateway bool +} + +// New returns a minimal Command +func New(host string) *Command { + return &Command{ + Host: host, + } +} + +func (c *Command) applyDefaults() { + if strings.Contains(c.Host, "@") { + parts := strings.Split(c.Host, "@") + c.User = parts[0] + c.Host = parts[1] + } + + if c.Port == 0 { + c.Port = 22 + } + + if c.isGateway { + c.SSHOptions = []string{"-W", "%h:%p"} + } +} + +// Slice returns an execve compatible slice of arguments +func (c *Command) Slice() []string { + c.applyDefaults() + + slice := []string{} + + slice = append(slice, "ssh") + + if c.EnableSSHKeyForwarding { + slice = append(slice, "-A") + } + + if c.Quiet { + slice = append(slice, "-q") + } + + if c.SkipHostKeyChecking { + slice = append(slice, "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no") + } + + if len(c.SSHOptions) > 0 { + slice = append(slice, c.SSHOptions...) + } + + if c.Gateway != nil { + c.Gateway.isGateway = true + slice = append(slice, "-o", "ProxyCommand="+c.Gateway.String()) + } + + if c.User != "" { + slice = append(slice, "-l", c.User) + } + + slice = append(slice, c.Host) + + if c.AllocateTTY { + slice = append(slice, "-t", "-t") + } + + slice = append(slice, "-p", fmt.Sprintf("%d", c.Port)) + if len(c.Command) > 0 { + slice = append(slice, "--", "/bin/sh", "-e") + if c.Debug { + slice = append(slice, "-x") + } + slice = append(slice, "-c") + + var escapedCommand []string + if c.NoEscapeCommand { + escapedCommand = c.Command + } else { + escapedCommand = []string{} + for _, part := range c.Command { + escapedCommand = append(escapedCommand, fmt.Sprintf("%q", part)) + } + } + slice = append(slice, fmt.Sprintf("%q", strings.Join(escapedCommand, " "))) + } + if runtime.GOOS == "windows" { + slice[len(slice)-1] = slice[len(slice)-1] + " " // Why ? + } + return slice +} + +// String returns a copy-pasteable command, useful for debugging +func (c *Command) String() string { + slice := c.Slice() + for i := range slice { + quoted := fmt.Sprintf("%q", slice[i]) + if strings.Contains(slice[i], " ") || len(quoted) != len(slice[i])+2 { + slice[i] = quoted + } + } + return strings.Join(slice, " ") +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/utils/quiet.go b/vendor/github.com/scaleway/scaleway-cli/pkg/utils/quiet.go new file mode 100644 index 000000000..775918d8d --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/utils/quiet.go @@ -0,0 +1,30 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +// Package utils contains logquiet +package utils + +import ( + "fmt" + "os" +) + +// LogQuietStruct is a struct to store information about quiet state +type LogQuietStruct struct { + quiet bool +} + +var instanceQuiet LogQuietStruct + +// Quiet enable or disable quiet +func Quiet(option bool) { + instanceQuiet.quiet = option +} + +// LogQuiet Displays info if quiet is activated +func LogQuiet(str string) { + if !instanceQuiet.quiet { + fmt.Fprintf(os.Stderr, "%s", str) + } +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/utils/utils.go b/vendor/github.com/scaleway/scaleway-cli/pkg/utils/utils.go new file mode 100644 index 000000000..6f11f4869 --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/utils/utils.go @@ -0,0 +1,253 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +// scw helpers + +// Package utils contains helpers +package utils + +import ( + "crypto/md5" + "errors" + "fmt" + "io" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "strings" + "time" + + "golang.org/x/crypto/ssh" + + "github.com/Sirupsen/logrus" + log "github.com/Sirupsen/logrus" + "github.com/mattn/go-isatty" + "github.com/moul/gotty-client" + "github.com/scaleway/scaleway-cli/pkg/sshcommand" +) + +// SpawnRedirection is used to redirects the fluxes +type SpawnRedirection struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// SSHExec executes a command over SSH and redirects file-descriptors +func SSHExec(publicIPAddress, privateIPAddress, user string, port int, command []string, checkConnection bool, gateway string, enableSSHKeyForwarding bool) error { + gatewayUser := "root" + gatewayIPAddress := gateway + if strings.Contains(gateway, "@") { + parts := strings.Split(gatewayIPAddress, "@") + if len(parts) != 2 { + return fmt.Errorf("gateway: must be like root@IP") + } + gatewayUser = parts[0] + gatewayIPAddress = parts[1] + gateway = gatewayUser + "@" + gatewayIPAddress + } + + if publicIPAddress == "" && gatewayIPAddress == "" { + return errors.New("server does not have public IP") + } + if privateIPAddress == "" && gatewayIPAddress != "" { + return errors.New("server does not have private IP") + } + + if checkConnection { + useGateway := gatewayIPAddress != "" + if useGateway && !IsTCPPortOpen(fmt.Sprintf("%s:22", gatewayIPAddress)) { + return errors.New("gateway is not available, try again later") + } + if !useGateway && !IsTCPPortOpen(fmt.Sprintf("%s:%d", publicIPAddress, port)) { + return errors.New("server is not ready, try again later") + } + } + + sshCommand := NewSSHExecCmd(publicIPAddress, privateIPAddress, user, port, isatty.IsTerminal(os.Stdin.Fd()), command, gateway, enableSSHKeyForwarding) + + log.Debugf("Executing: %s", sshCommand) + + spawn := exec.Command("ssh", sshCommand.Slice()[1:]...) + spawn.Stdout = os.Stdout + spawn.Stdin = os.Stdin + spawn.Stderr = os.Stderr + return spawn.Run() +} + +// NewSSHExecCmd computes execve compatible arguments to run a command via ssh +func NewSSHExecCmd(publicIPAddress, privateIPAddress, user string, port int, allocateTTY bool, command []string, gatewayIPAddress string, enableSSHKeyForwarding bool) *sshcommand.Command { + quiet := os.Getenv("DEBUG") != "1" + secureExec := os.Getenv("SCW_SECURE_EXEC") == "1" + sshCommand := &sshcommand.Command{ + AllocateTTY: allocateTTY, + Command: command, + Host: publicIPAddress, + Quiet: quiet, + SkipHostKeyChecking: !secureExec, + User: user, + NoEscapeCommand: true, + Port: port, + EnableSSHKeyForwarding: enableSSHKeyForwarding, + } + if gatewayIPAddress != "" { + sshCommand.Host = privateIPAddress + sshCommand.Gateway = &sshcommand.Command{ + Host: gatewayIPAddress, + SkipHostKeyChecking: !secureExec, + AllocateTTY: allocateTTY, + Quiet: quiet, + User: user, + Port: port, + } + } + + return sshCommand +} + +// GeneratingAnSSHKey generates an SSH key +func GeneratingAnSSHKey(cfg SpawnRedirection, path string, name string) (string, error) { + args := []string{ + "-t", + "rsa", + "-b", + "4096", + "-f", + filepath.Join(path, name), + "-N", + "", + "-C", + "", + } + log.Infof("Executing commands %v", args) + spawn := exec.Command("ssh-keygen", args...) + spawn.Stdout = cfg.Stdout + spawn.Stdin = cfg.Stdin + spawn.Stderr = cfg.Stderr + return args[5], spawn.Run() +} + +// WaitForTCPPortOpen calls IsTCPPortOpen in a loop +func WaitForTCPPortOpen(dest string) error { + for { + if IsTCPPortOpen(dest) { + break + } + time.Sleep(1 * time.Second) + } + return nil +} + +// IsTCPPortOpen returns true if a TCP communication with "host:port" can be initialized +func IsTCPPortOpen(dest string) bool { + conn, err := net.DialTimeout("tcp", dest, time.Duration(2000)*time.Millisecond) + if err == nil { + defer conn.Close() + } + return err == nil +} + +// TruncIf ensures the input string does not exceed max size if cond is met +func TruncIf(str string, max int, cond bool) string { + if cond && len(str) > max { + return str[:max] + } + return str +} + +// Wordify convert complex name to a single word without special shell characters +func Wordify(str string) string { + str = regexp.MustCompile(`[^a-zA-Z0-9-]`).ReplaceAllString(str, "_") + str = regexp.MustCompile(`__+`).ReplaceAllString(str, "_") + str = strings.Trim(str, "_") + return str +} + +// PathToTARPathparts returns the two parts of a unix path +func PathToTARPathparts(fullPath string) (string, string) { + fullPath = strings.TrimRight(fullPath, "/") + return path.Dir(fullPath), path.Base(fullPath) +} + +// RemoveDuplicates transforms an array into a unique array +func RemoveDuplicates(elements []string) []string { + encountered := map[string]bool{} + + // Create a map of all unique elements. + for v := range elements { + encountered[elements[v]] = true + } + + // Place all keys from the map into a slice. + result := []string{} + for key := range encountered { + result = append(result, key) + } + return result +} + +// AttachToSerial tries to connect to server serial using 'gotty-client' and fallback with a help message +func AttachToSerial(serverID, apiToken, url string) (*gottyclient.Client, chan bool, error) { + gottyURL := os.Getenv("SCW_GOTTY_URL") + if gottyURL == "" { + gottyURL = url + } + URL := fmt.Sprintf("%s?arg=%s&arg=%s", gottyURL, apiToken, serverID) + + logrus.Debug("Connection to ", URL) + gottycli, err := gottyclient.NewClient(URL) + if err != nil { + return nil, nil, err + } + + if os.Getenv("SCW_TLSVERIFY") == "0" { + gottycli.SkipTLSVerify = true + } + + gottycli.UseProxyFromEnv = true + + if err = gottycli.Connect(); err != nil { + return nil, nil, err + } + done := make(chan bool) + + fmt.Println("You are connected, type 'Ctrl+q' to quit.") + go func() { + gottycli.Loop() + gottycli.Close() + done <- true + }() + return gottycli, done, nil +} + +func rfc4716hex(data []byte) string { + fingerprint := "" + + for i := 0; i < len(data); i++ { + fingerprint = fmt.Sprintf("%s%0.2x", fingerprint, data[i]) + if i != len(data)-1 { + fingerprint = fingerprint + ":" + } + } + return fingerprint +} + +// SSHGetFingerprint returns the fingerprint of an SSH key +func SSHGetFingerprint(key []byte) (string, error) { + publicKey, comment, _, _, err := ssh.ParseAuthorizedKey(key) + if err != nil { + return "", err + } + switch reflect.TypeOf(publicKey).String() { + case "*ssh.rsaPublicKey", "*ssh.dsaPublicKey", "*ssh.ecdsaPublicKey": + md5sum := md5.Sum(publicKey.Marshal()) + return publicKey.Type() + " " + rfc4716hex(md5sum[:]) + " " + comment, nil + default: + return "", errors.New("Can't handle this key") + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go new file mode 100644 index 000000000..18379a935 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -0,0 +1,951 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "bytes" + "io" + "sync" + "unicode/utf8" +) + +// EscapeCodes contains escape sequences that can be written to the terminal in +// order to achieve different styles of text. +type EscapeCodes struct { + // Foreground colors + Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte + + // Reset all attributes + Reset []byte +} + +var vt100EscapeCodes = EscapeCodes{ + Black: []byte{keyEscape, '[', '3', '0', 'm'}, + Red: []byte{keyEscape, '[', '3', '1', 'm'}, + Green: []byte{keyEscape, '[', '3', '2', 'm'}, + Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, + Blue: []byte{keyEscape, '[', '3', '4', 'm'}, + Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, + Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, + White: []byte{keyEscape, '[', '3', '7', 'm'}, + + Reset: []byte{keyEscape, '[', '0', 'm'}, +} + +// Terminal contains the state for running a VT100 terminal that is capable of +// reading lines of input. +type Terminal struct { + // AutoCompleteCallback, if non-null, is called for each keypress with + // the full input line and the current position of the cursor (in + // bytes, as an index into |line|). If it returns ok=false, the key + // press is processed normally. Otherwise it returns a replacement line + // and the new cursor position. + AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) + + // Escape contains a pointer to the escape codes for this terminal. + // It's always a valid pointer, although the escape codes themselves + // may be empty if the terminal doesn't support them. + Escape *EscapeCodes + + // lock protects the terminal and the state in this object from + // concurrent processing of a key press and a Write() call. + lock sync.Mutex + + c io.ReadWriter + prompt []rune + + // line is the current line being entered. + line []rune + // pos is the logical position of the cursor in line + pos int + // echo is true if local echo is enabled + echo bool + // pasteActive is true iff there is a bracketed paste operation in + // progress. + pasteActive bool + + // cursorX contains the current X value of the cursor where the left + // edge is 0. cursorY contains the row number where the first row of + // the current line is 0. + cursorX, cursorY int + // maxLine is the greatest value of cursorY so far. + maxLine int + + termWidth, termHeight int + + // outBuf contains the terminal data to be sent. + outBuf []byte + // remainder contains the remainder of any partial key sequences after + // a read. It aliases into inBuf. + remainder []byte + inBuf [256]byte + + // history contains previously entered commands so that they can be + // accessed with the up and down keys. + history stRingBuffer + // historyIndex stores the currently accessed history entry, where zero + // means the immediately previous entry. + historyIndex int + // When navigating up and down the history it's possible to return to + // the incomplete, initial line. That value is stored in + // historyPending. + historyPending string +} + +// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is +// a local terminal, that terminal must first have been put into raw mode. +// prompt is a string that is written at the start of each input line (i.e. +// "> "). +func NewTerminal(c io.ReadWriter, prompt string) *Terminal { + return &Terminal{ + Escape: &vt100EscapeCodes, + c: c, + prompt: []rune(prompt), + termWidth: 80, + termHeight: 24, + echo: true, + historyIndex: -1, + } +} + +const ( + keyCtrlD = 4 + keyCtrlU = 21 + keyEnter = '\r' + keyEscape = 27 + keyBackspace = 127 + keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota + keyUp + keyDown + keyLeft + keyRight + keyAltLeft + keyAltRight + keyHome + keyEnd + keyDeleteWord + keyDeleteLine + keyClearScreen + keyPasteStart + keyPasteEnd +) + +var ( + crlf = []byte{'\r', '\n'} + pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} + pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} +) + +// bytesToKey tries to parse a key sequence from b. If successful, it returns +// the key and the remainder of the input. Otherwise it returns utf8.RuneError. +func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { + if len(b) == 0 { + return utf8.RuneError, nil + } + + if !pasteActive { + switch b[0] { + case 1: // ^A + return keyHome, b[1:] + case 5: // ^E + return keyEnd, b[1:] + case 8: // ^H + return keyBackspace, b[1:] + case 11: // ^K + return keyDeleteLine, b[1:] + case 12: // ^L + return keyClearScreen, b[1:] + case 23: // ^W + return keyDeleteWord, b[1:] + } + } + + if b[0] != keyEscape { + if !utf8.FullRune(b) { + return utf8.RuneError, b + } + r, l := utf8.DecodeRune(b) + return r, b[l:] + } + + if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { + switch b[2] { + case 'A': + return keyUp, b[3:] + case 'B': + return keyDown, b[3:] + case 'C': + return keyRight, b[3:] + case 'D': + return keyLeft, b[3:] + case 'H': + return keyHome, b[3:] + case 'F': + return keyEnd, b[3:] + } + } + + if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { + switch b[5] { + case 'C': + return keyAltRight, b[6:] + case 'D': + return keyAltLeft, b[6:] + } + } + + if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { + return keyPasteStart, b[6:] + } + + if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { + return keyPasteEnd, b[6:] + } + + // If we get here then we have a key that we don't recognise, or a + // partial sequence. It's not clear how one should find the end of a + // sequence without knowing them all, but it seems that [a-zA-Z~] only + // appears at the end of a sequence. + for i, c := range b[0:] { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { + return keyUnknown, b[i+1:] + } + } + + return utf8.RuneError, b +} + +// queue appends data to the end of t.outBuf +func (t *Terminal) queue(data []rune) { + t.outBuf = append(t.outBuf, []byte(string(data))...) +} + +var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} +var space = []rune{' '} + +func isPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// moveCursorToPos appends data to t.outBuf which will move the cursor to the +// given, logical position in the text. +func (t *Terminal) moveCursorToPos(pos int) { + if !t.echo { + return + } + + x := visualLength(t.prompt) + pos + y := x / t.termWidth + x = x % t.termWidth + + up := 0 + if y < t.cursorY { + up = t.cursorY - y + } + + down := 0 + if y > t.cursorY { + down = y - t.cursorY + } + + left := 0 + if x < t.cursorX { + left = t.cursorX - x + } + + right := 0 + if x > t.cursorX { + right = x - t.cursorX + } + + t.cursorX = x + t.cursorY = y + t.move(up, down, left, right) +} + +func (t *Terminal) move(up, down, left, right int) { + movement := make([]rune, 3*(up+down+left+right)) + m := movement + for i := 0; i < up; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'A' + m = m[3:] + } + for i := 0; i < down; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'B' + m = m[3:] + } + for i := 0; i < left; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'D' + m = m[3:] + } + for i := 0; i < right; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'C' + m = m[3:] + } + + t.queue(movement) +} + +func (t *Terminal) clearLineToRight() { + op := []rune{keyEscape, '[', 'K'} + t.queue(op) +} + +const maxLineLength = 4096 + +func (t *Terminal) setLine(newLine []rune, newPos int) { + if t.echo { + t.moveCursorToPos(0) + t.writeLine(newLine) + for i := len(newLine); i < len(t.line); i++ { + t.writeLine(space) + } + t.moveCursorToPos(newPos) + } + t.line = newLine + t.pos = newPos +} + +func (t *Terminal) advanceCursor(places int) { + t.cursorX += places + t.cursorY += t.cursorX / t.termWidth + if t.cursorY > t.maxLine { + t.maxLine = t.cursorY + } + t.cursorX = t.cursorX % t.termWidth + + if places > 0 && t.cursorX == 0 { + // Normally terminals will advance the current position + // when writing a character. But that doesn't happen + // for the last character in a line. However, when + // writing a character (except a new line) that causes + // a line wrap, the position will be advanced two + // places. + // + // So, if we are stopping at the end of a line, we + // need to write a newline so that our cursor can be + // advanced to the next line. + t.outBuf = append(t.outBuf, '\r', '\n') + } +} + +func (t *Terminal) eraseNPreviousChars(n int) { + if n == 0 { + return + } + + if t.pos < n { + n = t.pos + } + t.pos -= n + t.moveCursorToPos(t.pos) + + copy(t.line[t.pos:], t.line[n+t.pos:]) + t.line = t.line[:len(t.line)-n] + if t.echo { + t.writeLine(t.line[t.pos:]) + for i := 0; i < n; i++ { + t.queue(space) + } + t.advanceCursor(n) + t.moveCursorToPos(t.pos) + } +} + +// countToLeftWord returns then number of characters from the cursor to the +// start of the previous word. +func (t *Terminal) countToLeftWord() int { + if t.pos == 0 { + return 0 + } + + pos := t.pos - 1 + for pos > 0 { + if t.line[pos] != ' ' { + break + } + pos-- + } + for pos > 0 { + if t.line[pos] == ' ' { + pos++ + break + } + pos-- + } + + return t.pos - pos +} + +// countToRightWord returns then number of characters from the cursor to the +// start of the next word. +func (t *Terminal) countToRightWord() int { + pos := t.pos + for pos < len(t.line) { + if t.line[pos] == ' ' { + break + } + pos++ + } + for pos < len(t.line) { + if t.line[pos] != ' ' { + break + } + pos++ + } + return pos - t.pos +} + +// visualLength returns the number of visible glyphs in s. +func visualLength(runes []rune) int { + inEscapeSeq := false + length := 0 + + for _, r := range runes { + switch { + case inEscapeSeq: + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { + inEscapeSeq = false + } + case r == '\x1b': + inEscapeSeq = true + default: + length++ + } + } + + return length +} + +// handleKey processes the given key and, optionally, returns a line of text +// that the user has entered. +func (t *Terminal) handleKey(key rune) (line string, ok bool) { + if t.pasteActive && key != keyEnter { + t.addKeyToLine(key) + return + } + + switch key { + case keyBackspace: + if t.pos == 0 { + return + } + t.eraseNPreviousChars(1) + case keyAltLeft: + // move left by a word. + t.pos -= t.countToLeftWord() + t.moveCursorToPos(t.pos) + case keyAltRight: + // move right by a word. + t.pos += t.countToRightWord() + t.moveCursorToPos(t.pos) + case keyLeft: + if t.pos == 0 { + return + } + t.pos-- + t.moveCursorToPos(t.pos) + case keyRight: + if t.pos == len(t.line) { + return + } + t.pos++ + t.moveCursorToPos(t.pos) + case keyHome: + if t.pos == 0 { + return + } + t.pos = 0 + t.moveCursorToPos(t.pos) + case keyEnd: + if t.pos == len(t.line) { + return + } + t.pos = len(t.line) + t.moveCursorToPos(t.pos) + case keyUp: + entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) + if !ok { + return "", false + } + if t.historyIndex == -1 { + t.historyPending = string(t.line) + } + t.historyIndex++ + runes := []rune(entry) + t.setLine(runes, len(runes)) + case keyDown: + switch t.historyIndex { + case -1: + return + case 0: + runes := []rune(t.historyPending) + t.setLine(runes, len(runes)) + t.historyIndex-- + default: + entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) + if ok { + t.historyIndex-- + runes := []rune(entry) + t.setLine(runes, len(runes)) + } + } + case keyEnter: + t.moveCursorToPos(len(t.line)) + t.queue([]rune("\r\n")) + line = string(t.line) + ok = true + t.line = t.line[:0] + t.pos = 0 + t.cursorX = 0 + t.cursorY = 0 + t.maxLine = 0 + case keyDeleteWord: + // Delete zero or more spaces and then one or more characters. + t.eraseNPreviousChars(t.countToLeftWord()) + case keyDeleteLine: + // Delete everything from the current cursor position to the + // end of line. + for i := t.pos; i < len(t.line); i++ { + t.queue(space) + t.advanceCursor(1) + } + t.line = t.line[:t.pos] + t.moveCursorToPos(t.pos) + case keyCtrlD: + // Erase the character under the current position. + // The EOF case when the line is empty is handled in + // readLine(). + if t.pos < len(t.line) { + t.pos++ + t.eraseNPreviousChars(1) + } + case keyCtrlU: + t.eraseNPreviousChars(t.pos) + case keyClearScreen: + // Erases the screen and moves the cursor to the home position. + t.queue([]rune("\x1b[2J\x1b[H")) + t.queue(t.prompt) + t.cursorX, t.cursorY = 0, 0 + t.advanceCursor(visualLength(t.prompt)) + t.setLine(t.line, t.pos) + default: + if t.AutoCompleteCallback != nil { + prefix := string(t.line[:t.pos]) + suffix := string(t.line[t.pos:]) + + t.lock.Unlock() + newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) + t.lock.Lock() + + if completeOk { + t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) + return + } + } + if !isPrintable(key) { + return + } + if len(t.line) == maxLineLength { + return + } + t.addKeyToLine(key) + } + return +} + +// addKeyToLine inserts the given key at the current position in the current +// line. +func (t *Terminal) addKeyToLine(key rune) { + if len(t.line) == cap(t.line) { + newLine := make([]rune, len(t.line), 2*(1+len(t.line))) + copy(newLine, t.line) + t.line = newLine + } + t.line = t.line[:len(t.line)+1] + copy(t.line[t.pos+1:], t.line[t.pos:]) + t.line[t.pos] = key + if t.echo { + t.writeLine(t.line[t.pos:]) + } + t.pos++ + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) writeLine(line []rune) { + for len(line) != 0 { + remainingOnLine := t.termWidth - t.cursorX + todo := len(line) + if todo > remainingOnLine { + todo = remainingOnLine + } + t.queue(line[:todo]) + t.advanceCursor(visualLength(line[:todo])) + line = line[todo:] + } +} + +// writeWithCRLF writes buf to w but replaces all occurrences of \n with \r\n. +func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) { + for len(buf) > 0 { + i := bytes.IndexByte(buf, '\n') + todo := len(buf) + if i >= 0 { + todo = i + } + + var nn int + nn, err = w.Write(buf[:todo]) + n += nn + if err != nil { + return n, err + } + buf = buf[todo:] + + if i >= 0 { + if _, err = w.Write(crlf); err != nil { + return n, err + } + n += 1 + buf = buf[1:] + } + } + + return n, nil +} + +func (t *Terminal) Write(buf []byte) (n int, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + if t.cursorX == 0 && t.cursorY == 0 { + // This is the easy case: there's nothing on the screen that we + // have to move out of the way. + return writeWithCRLF(t.c, buf) + } + + // We have a prompt and possibly user input on the screen. We + // have to clear it first. + t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) + t.cursorX = 0 + t.clearLineToRight() + + for t.cursorY > 0 { + t.move(1 /* up */, 0, 0, 0) + t.cursorY-- + t.clearLineToRight() + } + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + + if n, err = writeWithCRLF(t.c, buf); err != nil { + return + } + + t.writeLine(t.prompt) + if t.echo { + t.writeLine(t.line) + } + + t.moveCursorToPos(t.pos) + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + return +} + +// ReadPassword temporarily changes the prompt and reads a password, without +// echo, from the terminal. +func (t *Terminal) ReadPassword(prompt string) (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + oldPrompt := t.prompt + t.prompt = []rune(prompt) + t.echo = false + + line, err = t.readLine() + + t.prompt = oldPrompt + t.echo = true + + return +} + +// ReadLine returns a line of input from the terminal. +func (t *Terminal) ReadLine() (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + return t.readLine() +} + +func (t *Terminal) readLine() (line string, err error) { + // t.lock must be held at this point + + if t.cursorX == 0 && t.cursorY == 0 { + t.writeLine(t.prompt) + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + } + + lineIsPasted := t.pasteActive + + for { + rest := t.remainder + lineOk := false + for !lineOk { + var key rune + key, rest = bytesToKey(rest, t.pasteActive) + if key == utf8.RuneError { + break + } + if !t.pasteActive { + if key == keyCtrlD { + if len(t.line) == 0 { + return "", io.EOF + } + } + if key == keyPasteStart { + t.pasteActive = true + if len(t.line) == 0 { + lineIsPasted = true + } + continue + } + } else if key == keyPasteEnd { + t.pasteActive = false + continue + } + if !t.pasteActive { + lineIsPasted = false + } + line, lineOk = t.handleKey(key) + } + if len(rest) > 0 { + n := copy(t.inBuf[:], rest) + t.remainder = t.inBuf[:n] + } else { + t.remainder = nil + } + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + if lineOk { + if t.echo { + t.historyIndex = -1 + t.history.Add(line) + } + if lineIsPasted { + err = ErrPasteIndicator + } + return + } + + // t.remainder is a slice at the beginning of t.inBuf + // containing a partial key sequence + readBuf := t.inBuf[len(t.remainder):] + var n int + + t.lock.Unlock() + n, err = t.c.Read(readBuf) + t.lock.Lock() + + if err != nil { + return + } + + t.remainder = t.inBuf[:n+len(t.remainder)] + } +} + +// SetPrompt sets the prompt to be used when reading subsequent lines. +func (t *Terminal) SetPrompt(prompt string) { + t.lock.Lock() + defer t.lock.Unlock() + + t.prompt = []rune(prompt) +} + +func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { + // Move cursor to column zero at the start of the line. + t.move(t.cursorY, 0, t.cursorX, 0) + t.cursorX, t.cursorY = 0, 0 + t.clearLineToRight() + for t.cursorY < numPrevLines { + // Move down a line + t.move(0, 1, 0, 0) + t.cursorY++ + t.clearLineToRight() + } + // Move back to beginning. + t.move(t.cursorY, 0, 0, 0) + t.cursorX, t.cursorY = 0, 0 + + t.queue(t.prompt) + t.advanceCursor(visualLength(t.prompt)) + t.writeLine(t.line) + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) SetSize(width, height int) error { + t.lock.Lock() + defer t.lock.Unlock() + + if width == 0 { + width = 1 + } + + oldWidth := t.termWidth + t.termWidth, t.termHeight = width, height + + switch { + case width == oldWidth: + // If the width didn't change then nothing else needs to be + // done. + return nil + case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: + // If there is nothing on current line and no prompt printed, + // just do nothing + return nil + case width < oldWidth: + // Some terminals (e.g. xterm) will truncate lines that were + // too long when shinking. Others, (e.g. gnome-terminal) will + // attempt to wrap them. For the former, repainting t.maxLine + // works great, but that behaviour goes badly wrong in the case + // of the latter because they have doubled every full line. + + // We assume that we are working on a terminal that wraps lines + // and adjust the cursor position based on every previous line + // wrapping and turning into two. This causes the prompt on + // xterms to move upwards, which isn't great, but it avoids a + // huge mess with gnome-terminal. + if t.cursorX >= t.termWidth { + t.cursorX = t.termWidth - 1 + } + t.cursorY *= 2 + t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) + case width > oldWidth: + // If the terminal expands then our position calculations will + // be wrong in the future because we think the cursor is + // |t.pos| chars into the string, but there will be a gap at + // the end of any wrapped line. + // + // But the position will actually be correct until we move, so + // we can move back to the beginning and repaint everything. + t.clearAndRepaintLinePlusNPrevious(t.maxLine) + } + + _, err := t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + return err +} + +type pasteIndicatorError struct{} + +func (pasteIndicatorError) Error() string { + return "terminal: ErrPasteIndicator not correctly handled" +} + +// ErrPasteIndicator may be returned from ReadLine as the error, in addition +// to valid line data. It indicates that bracketed paste mode is enabled and +// that the returned line consists only of pasted data. Programs may wish to +// interpret pasted data more literally than typed data. +var ErrPasteIndicator = pasteIndicatorError{} + +// SetBracketedPasteMode requests that the terminal bracket paste operations +// with markers. Not all terminals support this but, if it is supported, then +// enabling this mode will stop any autocomplete callback from running due to +// pastes. Additionally, any lines that are completely pasted will be returned +// from ReadLine with the error set to ErrPasteIndicator. +func (t *Terminal) SetBracketedPasteMode(on bool) { + if on { + io.WriteString(t.c, "\x1b[?2004h") + } else { + io.WriteString(t.c, "\x1b[?2004l") + } +} + +// stRingBuffer is a ring buffer of strings. +type stRingBuffer struct { + // entries contains max elements. + entries []string + max int + // head contains the index of the element most recently added to the ring. + head int + // size contains the number of elements in the ring. + size int +} + +func (s *stRingBuffer) Add(a string) { + if s.entries == nil { + const defaultNumEntries = 100 + s.entries = make([]string, defaultNumEntries) + s.max = defaultNumEntries + } + + s.head = (s.head + 1) % s.max + s.entries[s.head] = a + if s.size < s.max { + s.size++ + } +} + +// NthPreviousEntry returns the value passed to the nth previous call to Add. +// If n is zero then the immediately prior value is returned, if one, then the +// next most recent, and so on. If such an element doesn't exist then ok is +// false. +func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { + if n >= s.size { + return "", false + } + index := s.head - n + if index < 0 { + index += s.max + } + return s.entries[index], true +} + +// readPasswordLine reads from reader until it finds \n or io.EOF. +// The slice returned does not include the \n. +// readPasswordLine also ignores any \r it finds. +func readPasswordLine(reader io.Reader) ([]byte, error) { + var buf [1]byte + var ret []byte + + for { + n, err := reader.Read(buf[:]) + if n > 0 { + switch buf[0] { + case '\n': + return ret, nil + case '\r': + // remove \r from passwords on Windows + default: + ret = append(ret, buf[0]) + } + continue + } + if err != nil { + if err == io.EOF && len(ret) > 0 { + return ret, nil + } + return ret, err + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go new file mode 100644 index 000000000..d01919614 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -0,0 +1,119 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "syscall" + "unsafe" +) + +// State contains the state of a terminal. +type State struct { + termios syscall.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { + return nil, err + } + + newState := oldState.termios + // This attempts to replicate the behaviour documented for cfmakeraw in + // the termios(3) manpage. + newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN + newState.Cflag &^= syscall.CSIZE | syscall.PARENB + newState.Cflag |= syscall.CS8 + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0); err != 0 { + return err + } + return nil +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var dimensions [4]uint16 + + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 { + return -1, -1, err + } + return int(dimensions[1]), int(dimensions[0]), nil +} + +// passwordReader is an io.Reader that reads from a specific file descriptor. +type passwordReader int + +func (r passwordReader) Read(buf []byte) (int, error) { + return syscall.Read(int(r), buf) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var oldState syscall.Termios + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 { + return nil, err + } + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + return nil, err + } + + defer func() { + syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0) + }() + + return readPasswordLine(passwordReader(fd)) +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go new file mode 100644 index 000000000..9c1ffd145 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package terminal + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA +const ioctlWriteTermios = syscall.TIOCSETA diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go new file mode 100644 index 000000000..5883b22d7 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +// These constants are declared here, rather than importing +// them from the syscall package as some syscall packages, even +// on linux, for example gccgo, do not declare them. +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go new file mode 100644 index 000000000..799f049f0 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go @@ -0,0 +1,58 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "fmt" + "runtime" +) + +type State struct{} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + return false +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go new file mode 100644 index 000000000..07eb5edd7 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -0,0 +1,73 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "golang.org/x/sys/unix" + "io" + "syscall" +) + +// State contains the state of a terminal. +type State struct { + termios syscall.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + // see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c + var termio unix.Termio + err := unix.IoctlSetTermio(fd, unix.TCGETA, &termio) + return err == nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + // see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c + val, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + oldState := *val + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState) + if err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState) + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go new file mode 100644 index 000000000..e0a1f36ce --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "syscall" + "unsafe" +) + +const ( + enableLineInput = 2 + enableEchoInput = 4 + enableProcessedInput = 1 + enableWindowInput = 8 + enableMouseInput = 16 + enableInsertMode = 32 + enableQuickEditMode = 64 + enableExtendedFlags = 128 + enableAutoPosition = 256 + enableProcessedOutput = 1 + enableWrapAtEolOutput = 2 +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") +) + +type ( + short int16 + word uint16 + + coord struct { + x short + y short + } + smallRect struct { + left short + top short + right short + bottom short + } + consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord + } +) + +type State struct { + mode uint32 +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, 0, error(e) + } + return int(info.size.x), int(info.size.y), nil +} + +// passwordReader is an io.Reader that reads from a specific Windows HANDLE. +type passwordReader int + +func (r passwordReader) Read(buf []byte) (int, error) { + return syscall.Read(syscall.Handle(r), buf) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + old := st + + st &^= (enableEchoInput) + st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) + if e != 0 { + return nil, error(e) + } + + defer func() { + syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) + }() + + return readPasswordLine(passwordReader(fd)) +} diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 000000000..533438d91 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,67 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +package errgroup + +import ( + "sync" + + "golang.org/x/net/context" +) + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid and does not cancel on error. +type Group struct { + cancel func() + + wg sync.WaitGroup + + errOnce sync.Once + err error +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancel(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel() + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to return a non-nil error cancels the group; its error will be +// returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} From 4c7a467e9e7ac5a69754a6d616cd5a0c98ce0f13 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 8 Feb 2018 11:53:05 -0800 Subject: [PATCH 229/251] format scaleway docs --- website/source/docs/builders/scaleway.html.md | 59 ++++++++++--------- 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/website/source/docs/builders/scaleway.html.md b/website/source/docs/builders/scaleway.html.md index efa26dbee..b1864bdb0 100644 --- a/website/source/docs/builders/scaleway.html.md +++ b/website/source/docs/builders/scaleway.html.md @@ -4,10 +4,10 @@ sidebar_current: docs-builders-scaleway page_title: Scaleway - Builders description: |- The Scaleway Packer builder is able to create new images for use with - Scaleway BareMetal and Virtual cloud server. The builder takes a source image, runs any provisioning - necessary on the image after launching it, then snapshots it into a reusable - image. This reusable image can then be used as the foundation of new servers - that are launched within Scaleway. + Scaleway BareMetal and Virtual cloud server. The builder takes a source + image, runs any provisioning necessary on the image after launching it, then + snapshots it into a reusable image. This reusable image can then be used as + the foundation of new servers that are launched within Scaleway. --- @@ -36,36 +36,40 @@ builder. ### Required: -- `api_access_key` (string) - The api_access_key to use to access your account. - It can also be specified via - environment variable `SCALEWAY_API_ACCESS_KEY`. - Your access key is available in the ["Credentials" section](https://cloud.scaleway.com/#/credentials) of the control panel. +- `api_access_key` (string) - The api\_access\_key to use to access your + account. It can also be specified via environment variable + `SCALEWAY_API_ACCESS_KEY`. Your access key is available in the + ["Credentials" section](https://cloud.scaleway.com/#/credentials) of the + control panel. -- `api_token` (string) - The organization TOKEN to use to access your account. - It can also be specified via - environment variable `SCALEWAY_API_TOKEN`. - Your tokens are available in the ["Credentials" section](https://cloud.scaleway.com/#/credentials) of the control panel. +- `api_token` (string) - The organization TOKEN to use to access your + account. It can also be specified via environment variable + `SCALEWAY_API_TOKEN`. Your tokens are available in the ["Credentials" + section](https://cloud.scaleway.com/#/credentials) of the control panel. -- `image` (string) - The UUID of the base image to use. This is the - image that will be used to launch a new server and provision it. See - [https://api-marketplace.scaleway.com/images](https://api-marketplace.scaleway.com/images) - get the complete list of the accepted image UUID. +- `image` (string) - The UUID of the base image to use. This is the image + that will be used to launch a new server and provision it. See + get the complete list of the + accepted image UUID. -- `region` (string) - The name of the region to launch the - server in (`par1` or `ams1`). Consequently, this is the region where the snapshot will - be available. +- `region` (string) - The name of the region to launch the server in (`par1` + or `ams1`). Consequently, this is the region where the snapshot will be + available. -- `commercial_type` (string) - The name of the server commercial type: `C1`, `C2S`, `C2M`, - `C2L`, `X64-2GB`, `X64-4GB`, `X64-8GB`, `X64-15GB`, `X64-30GB`, `X64-60GB`, `X64-120GB`, `ARM64-2GB`, `ARM64-4GB`, `ARM64-8GB`, `ARM64-16GB`, `ARM64-32GB`, `ARM64-64GB`, `ARM64-128GB` +- `commercial_type` (string) - The name of the server commercial type: `C1`, + `C2S`, `C2M`, `C2L`, `X64-2GB`, `X64-4GB`, `X64-8GB`, `X64-15GB`, + `X64-30GB`, `X64-60GB`, `X64-120GB`, `ARM64-2GB`, `ARM64-4GB`, `ARM64-8GB`, + `ARM64-16GB`, `ARM64-32GB`, `ARM64-64GB`, `ARM64-128GB` ### Optional: -- `server_name` (string) - The name assigned to the server. Default `packer-UUID` +- `server_name` (string) - The name assigned to the server. Default + `packer-UUID` -- `image_name` (string) - The name of the resulting image that will - appear in your account. Default `packer-TIMESTAMP` +- `image_name` (string) - The name of the resulting image that will appear in + your account. Default `packer-TIMESTAMP` -- `snapshot_name` (string) - The name of the resulting snapshot that will +- `snapshot_name` (string) - The name of the resulting snapshot that will appear in your account. Default `packer-TIMESTAMP` ## Basic Example @@ -86,5 +90,6 @@ access tokens: } ``` -When you do not specified the `ssh_private_key_file`, a temporarily SSH keypair is generated to connect the server. -This key will only allow the `root` user to connect the server. +When you do not specified the `ssh_private_key_file`, a temporarily SSH keypair +is generated to connect the server. This key will only allow the `root` user to +connect the server. From e5ab2062769b8d8232ed622f3c774d560af07647 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 8 Feb 2018 12:33:51 -0800 Subject: [PATCH 230/251] add our 3 new builders to changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a8c3105e8..ada8d8e5e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,9 @@ ### IMPROVEMENTS: +* **New builder:** `scaleway` - The Scaleway Packer builder is able to create new images for use with Scaleway BareMetal and Virtual cloud server. [GH-4770] +* **New builder:** `ncloud` for building server images using the NAVER Cloud Platform. [GH-5791] +* **New builder:** `oci-classic` for building new custom images for use with Oracle Cloud Infrastructure Classic Compute. [GH-5819] * builder/docker: Remove credentials from being shown in the log. [GH-5666] * builder/triton: Triton RBAC is now supported. [GH-5741] * provisioner/ansible: Improve user retrieval. [GH-5758] From ad2e5f1f08c97058d135cb19c0ee484fc6c29399 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 8 Feb 2018 12:52:44 -0800 Subject: [PATCH 231/251] fail in oracle classic builder if user tries winrm since it doesn't work yet, and add attributes and attributes_file fields to oracle builder --- builder/oracle/classic/config.go | 16 ++++++++++ .../oracle/classic/step_create_instance.go | 30 +++++++++++++++++++ builder/oracle/classic/step_list_images.go | 30 ++++++++++++++++--- .../docs/builders/oracle-classic.html.md | 13 +++++++- 4 files changed, 84 insertions(+), 5 deletions(-) diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index 4cd7c42a7..52a08cbe9 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -3,6 +3,7 @@ package classic import ( "fmt" "net/url" + "os" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/communicator" @@ -27,6 +28,9 @@ type Config struct { Shape string `mapstructure:"shape"` SourceImageList string `mapstructure:"source_image_list"` DestImageList string `mapstructure:"dest_image_list"` + // Attributes and Atributes file are both optional and mutually exclusive. + Attributes string `mapstructure:"attributes"` + AttributesFile string `mapstructure:"attributes_file"` // Optional; if you don't enter anything, the image list description // will read "Packer-built image list" DestImageListDescription string `mapstructure:"image_description"` @@ -81,9 +85,21 @@ func NewConfig(raws ...interface{}) (*Config, error) { } } + if c.Attributes != "" && c.AttributesFile != "" { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("Only one of user_data or user_data_file can be specified.")) + } else if c.AttributesFile != "" { + if _, err := os.Stat(c.AttributesFile); err != nil { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("attributes_file not found: %s", c.AttributesFile)) + } + } + if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { errs = packer.MultiErrorAppend(errs, es...) } + if c.Comm.Type == "winrm" { + err = fmt.Errorf("winRM is not supported with the oracle-classic builder yet.") + errs = packer.MultiErrorAppend(errs, err) + } if errs != nil && len(errs.Errors) > 0 { return nil, errs diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index 041889ab1..cf4b043fe 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -2,7 +2,9 @@ package classic import ( "context" + "encoding/json" "fmt" + "io/ioutil" "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/packer/helper/multistep" @@ -30,6 +32,33 @@ func (s *stepCreateInstance) Run(_ context.Context, state multistep.StateBag) mu // get instances client instanceClient := client.Instances() + var data map[string]interface{} + + if config.Attributes != "" { + err := json.Unmarshal([]byte(config.Attributes), &data) + if err != nil { + err = fmt.Errorf("Problem parsing json from attributes: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } + } else if config.AttributesFile != "" { + fidata, err := ioutil.ReadFile(config.AttributesFile) + if err != nil { + err = fmt.Errorf("Problem reading attributes_file: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } + err = json.Unmarshal(fidata, &data) + if err != nil { + err = fmt.Errorf("Problem parsing json from attrinutes_file: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } + } + // Instances Input input := &compute.CreateInstanceInput{ Name: config.ImageName, @@ -37,6 +66,7 @@ func (s *stepCreateInstance) Run(_ context.Context, state multistep.StateBag) mu ImageList: config.SourceImageList, SSHKeys: []string{keyName}, Networking: map[string]compute.NetworkingInfo{"eth0": netInfo}, + Attributes: data, } instanceInfo, err := instanceClient.CreateInstance(input) diff --git a/builder/oracle/classic/step_list_images.go b/builder/oracle/classic/step_list_images.go index a99612c14..247b96146 100644 --- a/builder/oracle/classic/step_list_images.go +++ b/builder/oracle/classic/step_list_images.go @@ -11,11 +11,10 @@ import ( type stepListImages struct{} -func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { - // get variables from state - ui := state.Get("ui").(packer.Ui) - config := state.Get("config").(*Config) +func (s *stepListImages) listForSSH(state multistep.StateBag) multistep.StepAction { client := state.Get("client").(*compute.ComputeClient) + config := state.Get("config").(*Config) + ui := state.Get("ui").(packer.Ui) ui.Say("Adding image to image list...") imageListClient := client.ImageList() @@ -98,6 +97,29 @@ func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multis return multistep.ActionContinue } +func (s *stepListImages) listForWinRM(state multistep.StateBag) multistep.StepAction { + // This is a placeholder function; we will never reach this because we already + // return an error when winRM is set when validating the Packer config. + ui := state.Get("ui").(packer.Ui) + err := fmt.Errorf("The Oracle Classic builder does not currently support winRM.") + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt +} + +func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { + // get variables from state + config := state.Get("config").(*Config) + + var action multistep.StepAction + if config.Comm.Type == "winrm" { + action = s.listForWinRM(state) + } else if config.Comm.Type == "ssh" { + action = s.listForSSH(state) + } + return action +} + func (s *stepListImages) Cleanup(state multistep.StateBag) { // Nothing to do return diff --git a/website/source/docs/builders/oracle-classic.html.md b/website/source/docs/builders/oracle-classic.html.md index f8991b968..98db5f2aa 100644 --- a/website/source/docs/builders/oracle-classic.html.md +++ b/website/source/docs/builders/oracle-classic.html.md @@ -63,6 +63,16 @@ This builder currently only works with the SSH communicator. ### Optional + - `attributes` (string) - (string) - Attributes to apply when launching the + instance. Note that you need to be careful about escaping characters due to + the templates being JSON. It is often more convenient to use + `attributes_file`, instead. You may only define either `attributes` or + `attributes_file`, not both. + + - `attributes_file` (string) - Path to a json file that will be used for the + attributes when launching the instance. You may only define either + `attributes` or `attributes_file`, not both. + - `image_description` (string) - a description for your destination image list. If you don't provide one, Packer will provide a generic description. @@ -91,7 +101,8 @@ obfuscated; you will need to add a working `username`, `password`, "api_endpoint": "https://api-###.compute.###.oraclecloud.com/", "source_image_list": "/oracle/public/OL_7.2_UEKR4_x86_64", "shape": "oc3", - "image_name": "Packer_Builder_Test_{{timestamp}}" + "image_name": "Packer_Builder_Test_{{timestamp}}", + "attributes": "{\"userdata\": {\"pre-bootstrap\": {\"script\": [\"...\"]}}}", "dest_image_list": "Packer_Builder_Test_List" } ], From ff717c57844bdfb2a934f1a41f7b538596070a2e Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 8 Feb 2018 13:21:21 -0800 Subject: [PATCH 232/251] wrong place for differentiation between ssh and winrm --- builder/oracle/classic/step_list_images.go | 30 +++------------------- 1 file changed, 4 insertions(+), 26 deletions(-) diff --git a/builder/oracle/classic/step_list_images.go b/builder/oracle/classic/step_list_images.go index 247b96146..a99612c14 100644 --- a/builder/oracle/classic/step_list_images.go +++ b/builder/oracle/classic/step_list_images.go @@ -11,10 +11,11 @@ import ( type stepListImages struct{} -func (s *stepListImages) listForSSH(state multistep.StateBag) multistep.StepAction { - client := state.Get("client").(*compute.ComputeClient) - config := state.Get("config").(*Config) +func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { + // get variables from state ui := state.Get("ui").(packer.Ui) + config := state.Get("config").(*Config) + client := state.Get("client").(*compute.ComputeClient) ui.Say("Adding image to image list...") imageListClient := client.ImageList() @@ -97,29 +98,6 @@ func (s *stepListImages) listForSSH(state multistep.StateBag) multistep.StepActi return multistep.ActionContinue } -func (s *stepListImages) listForWinRM(state multistep.StateBag) multistep.StepAction { - // This is a placeholder function; we will never reach this because we already - // return an error when winRM is set when validating the Packer config. - ui := state.Get("ui").(packer.Ui) - err := fmt.Errorf("The Oracle Classic builder does not currently support winRM.") - ui.Error(err.Error()) - state.Put("error", err) - return multistep.ActionHalt -} - -func (s *stepListImages) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { - // get variables from state - config := state.Get("config").(*Config) - - var action multistep.StepAction - if config.Comm.Type == "winrm" { - action = s.listForWinRM(state) - } else if config.Comm.Type == "ssh" { - action = s.listForSSH(state) - } - return action -} - func (s *stepListImages) Cleanup(state multistep.StateBag) { // Nothing to do return From 7f631fcb772616af9d8da80d66f4a4133acdaa7f Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 8 Feb 2018 14:12:39 -0800 Subject: [PATCH 233/251] unpack attributes in oracle-classic builder earlier so that we error fast if there's an issue --- builder/oracle/classic/config.go | 28 +++++++++++++++++ .../oracle/classic/step_create_instance.go | 31 +------------------ 2 files changed, 29 insertions(+), 30 deletions(-) diff --git a/builder/oracle/classic/config.go b/builder/oracle/classic/config.go index 52a08cbe9..233a3a0d1 100644 --- a/builder/oracle/classic/config.go +++ b/builder/oracle/classic/config.go @@ -1,7 +1,9 @@ package classic import ( + "encoding/json" "fmt" + "io/ioutil" "net/url" "os" @@ -15,6 +17,7 @@ import ( type Config struct { common.PackerConfig `mapstructure:",squash"` Comm communicator.Config `mapstructure:",squash"` + attribs map[string]interface{} // Access config overrides Username string `mapstructure:"username"` @@ -105,5 +108,30 @@ func NewConfig(raws ...interface{}) (*Config, error) { return nil, errs } + // unpack attributes from json into config + var data map[string]interface{} + + if c.Attributes != "" { + err := json.Unmarshal([]byte(c.Attributes), &data) + if err != nil { + err = fmt.Errorf("Problem parsing json from attributes: %s", err) + packer.MultiErrorAppend(errs, err) + } + c.attribs = data + } else if c.AttributesFile != "" { + fidata, err := ioutil.ReadFile(c.AttributesFile) + if err != nil { + err = fmt.Errorf("Problem reading attributes_file: %s", err) + packer.MultiErrorAppend(errs, err) + } + err = json.Unmarshal(fidata, &data) + c.attribs = data + if err != nil { + err = fmt.Errorf("Problem parsing json from attrinutes_file: %s", err) + packer.MultiErrorAppend(errs, err) + } + c.attribs = data + } + return c, nil } diff --git a/builder/oracle/classic/step_create_instance.go b/builder/oracle/classic/step_create_instance.go index cf4b043fe..cda67122c 100644 --- a/builder/oracle/classic/step_create_instance.go +++ b/builder/oracle/classic/step_create_instance.go @@ -2,9 +2,7 @@ package classic import ( "context" - "encoding/json" "fmt" - "io/ioutil" "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/packer/helper/multistep" @@ -32,33 +30,6 @@ func (s *stepCreateInstance) Run(_ context.Context, state multistep.StateBag) mu // get instances client instanceClient := client.Instances() - var data map[string]interface{} - - if config.Attributes != "" { - err := json.Unmarshal([]byte(config.Attributes), &data) - if err != nil { - err = fmt.Errorf("Problem parsing json from attributes: %s", err) - ui.Error(err.Error()) - state.Put("error", err) - return multistep.ActionHalt - } - } else if config.AttributesFile != "" { - fidata, err := ioutil.ReadFile(config.AttributesFile) - if err != nil { - err = fmt.Errorf("Problem reading attributes_file: %s", err) - ui.Error(err.Error()) - state.Put("error", err) - return multistep.ActionHalt - } - err = json.Unmarshal(fidata, &data) - if err != nil { - err = fmt.Errorf("Problem parsing json from attrinutes_file: %s", err) - ui.Error(err.Error()) - state.Put("error", err) - return multistep.ActionHalt - } - } - // Instances Input input := &compute.CreateInstanceInput{ Name: config.ImageName, @@ -66,7 +37,7 @@ func (s *stepCreateInstance) Run(_ context.Context, state multistep.StateBag) mu ImageList: config.SourceImageList, SSHKeys: []string{keyName}, Networking: map[string]compute.NetworkingInfo{"eth0": netInfo}, - Attributes: data, + Attributes: config.attribs, } instanceInfo, err := instanceClient.CreateInstance(input) From a9b48309c90ae67598af90b4398bbd16958a0abd Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 8 Feb 2018 14:48:04 -0800 Subject: [PATCH 234/251] update amazon authentication docs Correct docs to reflect new behavior. This is largely borrowed from terraform. --- website/source/docs/builders/amazon.html.md | 105 ++++++++++++++------ 1 file changed, 73 insertions(+), 32 deletions(-) diff --git a/website/source/docs/builders/amazon.html.md b/website/source/docs/builders/amazon.html.md index e77e4bf79..7e7b763c9 100644 --- a/website/source/docs/builders/amazon.html.md +++ b/website/source/docs/builders/amazon.html.md @@ -49,49 +49,90 @@ filesystem and data. -## Specifying Amazon Credentials +## Authentication -When you use any of the amazon builders, you must provide credentials to the API -in the form of an access key id and secret. These look like: +The AWS provider offers a flexible means of providing credentials for +authentication. The following methods are supported, in this order, and +explained below: - access key id: AKIAIOSFODNN7EXAMPLE - secret access key: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY +- Static credentials +- Environment variables +- Shared credentials file +- EC2 Role -If you use other AWS tools you may already have these configured. If so, packer -will try to use them, *unless* they are specified in your packer template. -Credentials are resolved in the following order: +### Static Credentials -1. Values hard-coded in the packer template are always authoritative. -2. *Variables* in the packer template may be resolved from command-line flags - or from environment variables. Please read about [User - Variables](https://www.packer.io/docs/templates/user-variables.html) - for details. -3. If no credentials are found, packer falls back to automatic lookup. +Static credentials can be provided in the form of an access key id and secret. +These look like: -### Automatic Lookup +```json +{ + "access_key": "AKIAIOSFODNN7EXAMPLE", + "secret_key": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + "region": "us-east-1", + "type": "amazon-ebs" +} +``` -Packer depends on the [AWS -SDK](https://aws.amazon.com/documentation/sdk-for-go/) to perform automatic -lookup using *credential chains*. In short, the SDK looks for credentials in -the following order: +### Environment variables -1. Environment variables. -2. Shared credentials file. -3. If your application is running on an Amazon EC2 instance, IAM role for Amazon EC2. +You can provide your credentials via the `AWS_ACCESS_KEY_ID` and +`AWS_SECRET_ACCESS_KEY`, environment variables, representing your AWS Access +Key and AWS Secret Key, respectively. Note that setting your AWS credentials +using either these environment variables will override the use of +`AWS_SHARED_CREDENTIALS_FILE` and `AWS_PROFILE`. The `AWS_DEFAULT_REGION` and +`AWS_SESSION_TOKEN` environment variables are also used, if applicable: -Please refer to the SDK's documentation on [specifying -credentials](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials) -for more information. -## Using an IAM Task or Instance Role +Usage: -If AWS keys are not specified in the template, a -[shared credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) -or through environment variables Packer will use credentials provided by -the task's or instance's IAM role, if it has one. +``` +$ export AWS_ACCESS_KEY_ID="anaccesskey" +$ export AWS_SECRET_ACCESS_KEY="asecretkey" +$ export AWS_DEFAULT_REGION="us-west-2" +$ packer build packer.json +``` -The following policy document provides the minimal set permissions necessary for -Packer to work: +### Shared Credentials file + +You can use an AWS credentials file to specify your credentials. The default +location is $HOME/.aws/credentials on Linux and OS X, or +"%USERPROFILE%.aws\credentials" for Windows users. If we fail to detect +credentials inline, or in the environment, Packer will check this location. You +can optionally specify a different location in the configuration by setting the +environment with the `AWS_SHARED_CREDENTIALS_FILE` variable. + +You may also configure the profile to use by setting the `profile` +configuration option, or setting the `AWS_PROFILE` environment variable: + +```json +{ + "profile": "customprofile", + "region": "us-east-1", + "type": "amazon-ebs" +} +``` + + +### IAM Task or Instance Role + +Finally, Packer will use credentials provided by the task's or instance's IAM +role, if it has one. + +This is a preferred approach over any other when running in EC2 as you can +avoid hard coding credentials. Instead these are leased on-the-fly by Packer, +which reduces the chance of leakage. + +The default deadline for the EC2 metadata API endpoint is 100 milliseconds, +which can be overidden by setting the `AWS_METADATA_TIMEOUT` environment +variable. The variable expects a positive golang Time.Duration string, which is +a sequence of decimal numbers and a unit suffix; valid suffixes are `ns` +(nanoseconds), `us` (microseconds), `ms` (milliseconds), `s` (seconds), `m` +(minutes), and `h` (hours). Examples of valid inputs: `100ms`, `250ms`, `1s`, +`2.5s`, `2.5m`, `1m30s`. + +The following policy document provides the minimal set permissions necessary +for Packer to work: ``` json { From c11627fba7a0e88a921e7510e5203af3a1596b63 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 8 Feb 2018 14:52:51 -0800 Subject: [PATCH 235/251] document new skip_metadata_api_check option --- website/source/docs/builders/amazon-chroot.html.md | 6 ++++++ website/source/docs/builders/amazon-ebs.html.md | 6 ++++++ website/source/docs/builders/amazon-ebssurrogate.html.md | 6 ++++++ website/source/docs/builders/amazon-ebsvolume.html.md | 6 ++++++ website/source/docs/builders/amazon-instance.html.md | 6 ++++++ 5 files changed, 30 insertions(+) diff --git a/website/source/docs/builders/amazon-chroot.html.md b/website/source/docs/builders/amazon-chroot.html.md index a983305c2..1757b06bf 100644 --- a/website/source/docs/builders/amazon-chroot.html.md +++ b/website/source/docs/builders/amazon-chroot.html.md @@ -243,6 +243,12 @@ each category, the available configuration keys are alphabetized. of the `source_ami` unless `from_scratch` is `true`, in which case this field must be defined. +- `skip_metadata_api_check` - (boolean) Skip the AWS Metadata API check. + Useful for AWS API implementations that do not have a metadata API + endpoint. Setting to `true` prevents Packer from authenticating via the + Metadata API. You may need to use other authentication methods like static + credentials, configuration variables, or environment variables. + - `skip_region_validation` (boolean) - Set to true if you want to skip validation of the `ami_regions` configuration option. Default `false`. diff --git a/website/source/docs/builders/amazon-ebs.html.md b/website/source/docs/builders/amazon-ebs.html.md index 6b48621b4..c83698569 100644 --- a/website/source/docs/builders/amazon-ebs.html.md +++ b/website/source/docs/builders/amazon-ebs.html.md @@ -244,6 +244,12 @@ builder. in case Packer exits ungracefully. Possible values are "stop" and "terminate", default is `stop`. +- `skip_metadata_api_check` - (boolean) Skip the AWS Metadata API check. + Useful for AWS API implementations that do not have a metadata API + endpoint. Setting to `true` prevents Packer from authenticating via the + Metadata API. You may need to use other authentication methods like static + credentials, configuration variables, or environment variables. + - `skip_region_validation` (boolean) - Set to true if you want to skip validation of the region configuration option. Default `false`. diff --git a/website/source/docs/builders/amazon-ebssurrogate.html.md b/website/source/docs/builders/amazon-ebssurrogate.html.md index 7b995fa87..aeffe2f09 100644 --- a/website/source/docs/builders/amazon-ebssurrogate.html.md +++ b/website/source/docs/builders/amazon-ebssurrogate.html.md @@ -237,6 +237,12 @@ builder. incase packer exits ungracefully. Possible values are "stop" and "terminate", default is `stop`. +- `skip_metadata_api_check` - (boolean) Skip the AWS Metadata API check. + Useful for AWS API implementations that do not have a metadata API + endpoint. Setting to `true` prevents Packer from authenticating via the + Metadata API. You may need to use other authentication methods like static + credentials, configuration variables, or environment variables. + - `skip_region_validation` (boolean) - Set to true if you want to skip validation of the region configuration option. Default `false`. diff --git a/website/source/docs/builders/amazon-ebsvolume.html.md b/website/source/docs/builders/amazon-ebsvolume.html.md index b500aa2bd..174673ed0 100644 --- a/website/source/docs/builders/amazon-ebsvolume.html.md +++ b/website/source/docs/builders/amazon-ebsvolume.html.md @@ -156,6 +156,12 @@ builder. in case Packer exits ungracefully. Possible values are `stop` and `terminate`. Defaults to `stop`. +- `skip_metadata_api_check` - (boolean) Skip the AWS Metadata API check. + Useful for AWS API implementations that do not have a metadata API + endpoint. Setting to `true` prevents Packer from authenticating via the + Metadata API. You may need to use other authentication methods like static + credentials, configuration variables, or environment variables. + - `skip_region_validation` (boolean) - Set to `true` if you want to skip validation of the region configuration option. Defaults to `false`. diff --git a/website/source/docs/builders/amazon-instance.html.md b/website/source/docs/builders/amazon-instance.html.md index f8f72708a..299222837 100644 --- a/website/source/docs/builders/amazon-instance.html.md +++ b/website/source/docs/builders/amazon-instance.html.md @@ -248,6 +248,12 @@ builder. The default is `0.0.0.0/0` (ie, allow any IPv4 source). This is only used when `security_group_id` or `security_group_ids` is not specified. +- `skip_metadata_api_check` - (boolean) Skip the AWS Metadata API check. + Useful for AWS API implementations that do not have a metadata API + endpoint. Setting to `true` prevents Packer from authenticating via the + Metadata API. You may need to use other authentication methods like static + credentials, configuration variables, or environment variables. + - `skip_region_validation` (boolean) - Set to true if you want to skip validation of the region configuration option. Defaults to `false`. From 5af42ee9e285501c0ae8fdc1f2c2086ed1bf2fd1 Mon Sep 17 00:00:00 2001 From: SwampDragons Date: Thu, 8 Feb 2018 15:10:53 -0800 Subject: [PATCH 236/251] Revert "Add `winrm_no_proxy` option." --- helper/communicator/config.go | 9 +++---- helper/communicator/step_connect_test.go | 24 ----------------- helper/communicator/step_connect_winrm.go | 26 ------------------- .../docs/templates/communicator.html.md | 15 ++++------- 4 files changed, 9 insertions(+), 65 deletions(-) diff --git a/helper/communicator/config.go b/helper/communicator/config.go index 52b81a3a6..f2664b192 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -41,15 +41,14 @@ type Config struct { SSHReadWriteTimeout time.Duration `mapstructure:"ssh_read_write_timeout"` // WinRM - WinRMHost string `mapstructure:"winrm_host"` - WinRMInsecure bool `mapstructure:"winrm_insecure"` - WinRMNoProxy bool `mapstructure:"winrm_no_proxy"` + WinRMUser string `mapstructure:"winrm_username"` WinRMPassword string `mapstructure:"winrm_password"` + WinRMHost string `mapstructure:"winrm_host"` WinRMPort int `mapstructure:"winrm_port"` WinRMTimeout time.Duration `mapstructure:"winrm_timeout"` - WinRMUseNTLM bool `mapstructure:"winrm_use_ntlm"` WinRMUseSSL bool `mapstructure:"winrm_use_ssl"` - WinRMUser string `mapstructure:"winrm_username"` + WinRMInsecure bool `mapstructure:"winrm_insecure"` + WinRMUseNTLM bool `mapstructure:"winrm_use_ntlm"` WinRMTransportDecorator func() winrm.Transporter } diff --git a/helper/communicator/step_connect_test.go b/helper/communicator/step_connect_test.go index 6db32fba5..fb61f6463 100644 --- a/helper/communicator/step_connect_test.go +++ b/helper/communicator/step_connect_test.go @@ -3,12 +3,10 @@ package communicator import ( "bytes" "context" - "os" "testing" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "github.com/stretchr/testify/assert" ) func TestStepConnect_impl(t *testing.T) { @@ -31,28 +29,6 @@ func TestStepConnect_none(t *testing.T) { } } -var noProxyTests = []struct { - current string - expected string -}{ - {"", "foo:1"}, - {"foo:1", "foo:1"}, - {"foo:1,bar:2", "foo:1,bar:2"}, - {"bar:2", "bar:2,foo:1"}, -} - -func TestStepConnect_setNoProxy(t *testing.T) { - key := "NO_PROXY" - for _, tt := range noProxyTests { - if value := os.Getenv(key); value != "" { - os.Unsetenv(key) - defer func() { os.Setenv(key, value) }() - os.Setenv(key, tt.current) - assert.Equal(t, tt.expected, os.Getenv(key), "env not set correctly.") - } - } -} - func testState(t *testing.T) multistep.StateBag { state := new(multistep.BasicStateBag) state.Put("hook", &packer.MockHook{}) diff --git a/helper/communicator/step_connect_winrm.go b/helper/communicator/step_connect_winrm.go index cdda73a5a..06e6236f8 100644 --- a/helper/communicator/step_connect_winrm.go +++ b/helper/communicator/step_connect_winrm.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "log" - "os" "strings" "time" @@ -129,12 +128,6 @@ func (s *StepConnectWinRM) waitForWinRM(state multistep.StateBag, cancel <-chan } } - if s.Config.WinRMNoProxy { - if err := setNoProxy(host, port); err != nil { - return nil, fmt.Errorf("Error setting no_proxy: %s", err) - } - } - log.Println("[INFO] Attempting WinRM connection...") comm, err = winrm.New(&winrm.Config{ Host: host, @@ -189,22 +182,3 @@ func (s *StepConnectWinRM) waitForWinRM(state multistep.StateBag, cancel <-chan return comm, nil } - -// setNoProxy configures the $NO_PROXY env var -func setNoProxy(host string, port int) error { - current := os.Getenv("NO_PROXY") - p := fmt.Sprintf("%s:%d", host, port) - // not set - // set - // is set and not contains - // set - // is set and contains - if current == "" { - return os.Setenv("NO_PROXY", p) - } - if !strings.Contains(current, p) { - return os.Setenv("NO_PROXY", strings.Join([]string{current, p}, ",")) - } - return nil - -} diff --git a/website/source/docs/templates/communicator.html.md b/website/source/docs/templates/communicator.html.md index db768a90a..0165ab5a1 100644 --- a/website/source/docs/templates/communicator.html.md +++ b/website/source/docs/templates/communicator.html.md @@ -139,21 +139,16 @@ The WinRM communicator has the following options. - `winrm_password` (string) - The password to use to connect to WinRM. -- `winrm_insecure` (boolean) - If true, do not check server certificate - chain and host name - -* `winrm_no_proxy` (boolean) - Setting this to `true` adds the remote - `host:post` to the `NO_PROXY` environment variable. This has the effect of - bypassing any configured proxies when connecting to the remote host. - Default to `false`. - - `winrm_timeout` (string) - The amount of time to wait for WinRM to become available. This defaults to "30m" since setting up a Windows machine generally takes a long time. +- `winrm_use_ssl` (boolean) - If true, use HTTPS for WinRM + +- `winrm_insecure` (boolean) - If true, do not check server certificate + chain and host name + - `winrm_use_ntlm` (boolean) - If true, NTLM authentication will be used for WinRM, rather than default (basic authentication), removing the requirement for basic authentication to be enabled within the target guest. Further reading for remote connection authentication can be found [here](https://msdn.microsoft.com/en-us/library/aa384295(v=vs.85).aspx). - -- `winrm_use_ssl` (boolean) - If true, use HTTPS for WinRM From 49f9d318377591bbba2de56395e6b8fa94020903 Mon Sep 17 00:00:00 2001 From: DanHam Date: Thu, 8 Feb 2018 23:15:15 +0000 Subject: [PATCH 237/251] Update docs to explain new auto escape of chars special to PowerShell --- .../docs/provisioners/powershell.html.md | 100 +++++++++++++++++- 1 file changed, 99 insertions(+), 1 deletion(-) diff --git a/website/source/docs/provisioners/powershell.html.md b/website/source/docs/provisioners/powershell.html.md index ae897489f..a9c218638 100644 --- a/website/source/docs/provisioners/powershell.html.md +++ b/website/source/docs/provisioners/powershell.html.md @@ -56,7 +56,7 @@ Optional parameters: files, and Packer should therefore not convert Windows line endings to Unix line endings (if there are any). By default this is false. -- `elevated_execute_command` (string) - The command to use to execute the +- `elevated_execute_command` (string) - The command to use to execute the elevated script. By default this is as follows: ``` powershell @@ -125,3 +125,101 @@ commonly useful environmental variables: download large files over http. This may be useful if you're experiencing slower speeds using the default file provisioner. A file provisioner using the `winrm` communicator may experience these types of difficulties. + +## Packer's Handling of Characters Special to PowerShell + +The escape character in PowerShell is the `backtick`, also sometimes +referred to as the `grave accent`. When, and when not, to escape characters +special to PowerShell is probably best demonstrated with a series of examples. + +### When To Escape... + +Users need to deal with escaping characters special to PowerShell when they +appear *directly* in commands used in the `inline` PowerShell provisioner and +when they appear *directly* in the users own scripts. +Note that where double quotes appear within double quotes, the addition of +a backslash escape is required for the JSON template to be parsed correctly. + +``` json + "provisioners": [ + { + "type": "powershell", + "inline": [ + "Write-Host \"A literal dollar `$ must be escaped\"", + "Write-Host \"A literal backtick `` must be escaped\"", + "Write-Host \"Here `\"double quotes`\" must be escaped\"", + "Write-Host \"Here `'single quotes`' don`'t really need to be\"", + "Write-Host \"escaped... but it doesn`'t hurt to do so.\"", + ] + }, +``` + +The above snippet should result in the following output on the Packer console: + +``` +==> amazon-ebs: Provisioning with Powershell... +==> amazon-ebs: Provisioning with powershell script: /var/folders/15/d0f7gdg13rnd1cxp7tgmr55c0000gn/T/packer-powershell-provisioner508190439 + amazon-ebs: A literal dollar $ must be escaped + amazon-ebs: A literal backtick ` must be escaped + amazon-ebs: Here "double quotes" must be escaped + amazon-ebs: Here 'single quotes' don't really need to be + amazon-ebs: escaped... but it doesn't hurt to do so. +``` + +### When Not To Escape... + +Special characters appearing in user environment variable values and in the +`elevated_user` and `elevated_password` fields will be automatically +dealt with for the user. There is no need to use escapes in these instances. + +``` json +{ + "variables": { + "psvar": "My$tring" + }, + ... + "provisioners": [ + { + "type": "powershell", + "elevated_user": "Administrator", + "elevated_password": "Super$3cr3t!", + "inline": "Write-Output \"The dollar in the elevated_password is interpreted correctly\"" + }, + { + "type": "powershell", + "environment_vars": [ + "VAR1=A$Dollar", + "VAR2=A`Backtick", + "VAR3=A'SingleQuote", + "VAR4=A\"DoubleQuote", + "VAR5={{user `psvar`}}" + ], + "inline": [ + "Write-Output \"In the following examples the special character is interpreted correctly:\"", + "Write-Output \"The dollar in VAR1: $Env:VAR1\"", + "Write-Output \"The backtick in VAR2: $Env:VAR2\"", + "Write-Output \"The single quote in VAR3: $Env:VAR3\"", + "Write-Output \"The double quote in VAR4: $Env:VAR4\"", + "Write-Output \"The dollar in VAR5 (expanded from a user var): $Env:VAR5\"" + ] + } + ] + ... +} +``` + +The above snippet should result in the following output on the Packer console: + +``` +==> amazon-ebs: Provisioning with Powershell... +==> amazon-ebs: Provisioning with powershell script: /var/folders/15/d0f7gdg13rnd1cxp7tgmr55c0000gn/T/packer-powershell-provisioner961728919 + amazon-ebs: The dollar in the elevated_password is interpreted correctly +==> amazon-ebs: Provisioning with Powershell... +==> amazon-ebs: Provisioning with powershell script: /var/folders/15/d0f7gdg13rnd1cxp7tgmr55c0000gn/T/packer-powershell-provisioner142826554 + amazon-ebs: In the following examples the special character is interpreted correctly: + amazon-ebs: The dollar in VAR1: A$Dollar + amazon-ebs: The backtick in VAR2: A`Backtick + amazon-ebs: The single quote in VAR3: A'SingleQuote + amazon-ebs: The double quote in VAR4: A"DoubleQuote + amazon-ebs: The dollar in VAR5 (expanded from a user var): My$tring +``` From 19a89a101ec0a38fec88885eba4c0ba880a3bedc Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 8 Feb 2018 16:47:17 -0800 Subject: [PATCH 238/251] builder/amazon: remove ssh_private_ip ssh_private_ip should now be set through ssh_interface. Adds fixer to automatically fix existing json files --- builder/amazon/common/run_config.go | 9 --- fix/fixer.go | 2 + fix/fixer_amazon_enhanced_networking.go | 15 +++++ fix/fixer_amazon_enhanced_networking_test.go | 8 +-- fix/fixer_amazon_private_ip.go | 70 ++++++++++++++++++++ fix/fixer_amazon_private_ip_test.go | 64 ++++++++++++++++++ fix/fixer_amazon_shutdown_behavior.go | 4 +- 7 files changed, 158 insertions(+), 14 deletions(-) create mode 100644 fix/fixer_amazon_private_ip.go create mode 100644 fix/fixer_amazon_private_ip_test.go diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index 1b50b4842..289a461b9 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -53,7 +53,6 @@ type RunConfig struct { // Communicator settings Comm communicator.Config `mapstructure:",squash"` SSHKeyPairName string `mapstructure:"ssh_keypair_name"` - SSHPrivateIp bool `mapstructure:"ssh_private_ip"` SSHInterface string `mapstructure:"ssh_interface"` } @@ -78,14 +77,6 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { // Validation errs := c.Comm.Prepare(ctx) - if c.SSHPrivateIp && c.SSHInterface != "" { - errs = append(errs, errors.New("ssh_interface and ssh_private_ip should not both be specified")) - } - - // Legacy configurable - if c.SSHPrivateIp { - c.SSHInterface = "private_ip" - } // Valadating ssh_interface if c.SSHInterface != "public_ip" && diff --git a/fix/fixer.go b/fix/fixer.go index 5ca0b3a18..9f6247ee9 100644 --- a/fix/fixer.go +++ b/fix/fixer.go @@ -33,6 +33,7 @@ func init() { "manifest-filename": new(FixerManifestFilename), "amazon-shutdown_behavior": new(FixerAmazonShutdownBehavior), "amazon-enhanced-networking": new(FixerAmazonEnhancedNetworking), + "amazon-private-ip": new(FixerAmazonPrivateIP), "docker-email": new(FixerDockerEmail), } @@ -50,6 +51,7 @@ func init() { "manifest-filename", "amazon-shutdown_behavior", "amazon-enhanced-networking", + "amazon-private-ip", "docker-email", } } diff --git a/fix/fixer_amazon_enhanced_networking.go b/fix/fixer_amazon_enhanced_networking.go index 4c9330ebe..7188da7f7 100644 --- a/fix/fixer_amazon_enhanced_networking.go +++ b/fix/fixer_amazon_enhanced_networking.go @@ -1,6 +1,8 @@ package fix import ( + "strings" + "github.com/mitchellh/mapstructure" ) @@ -22,6 +24,19 @@ func (FixerAmazonEnhancedNetworking) Fix(input map[string]interface{}) (map[stri // Go through each builder and replace the enhanced_networking if we can for _, builder := range tpl.Builders { + builderTypeRaw, ok := builder["type"] + if !ok { + continue + } + + builderType, ok := builderTypeRaw.(string) + if !ok { + continue + } + + if !strings.HasPrefix(builderType, "amazon-") { + continue + } enhancedNetworkingRaw, ok := builder["enhanced_networking"] if !ok { continue diff --git a/fix/fixer_amazon_enhanced_networking_test.go b/fix/fixer_amazon_enhanced_networking_test.go index f8b5be178..78b58d582 100644 --- a/fix/fixer_amazon_enhanced_networking_test.go +++ b/fix/fixer_amazon_enhanced_networking_test.go @@ -17,12 +17,12 @@ func TestFixerAmazonEnhancedNetworking(t *testing.T) { // Attach field == false { Input: map[string]interface{}{ - "type": "ebs", + "type": "amazon-ebs", "enhanced_networking": false, }, Expected: map[string]interface{}{ - "type": "ebs", + "type": "amazon-ebs", "ena_support": false, }, }, @@ -30,12 +30,12 @@ func TestFixerAmazonEnhancedNetworking(t *testing.T) { // Attach field == true { Input: map[string]interface{}{ - "type": "ebs", + "type": "amazon-ebs", "enhanced_networking": true, }, Expected: map[string]interface{}{ - "type": "ebs", + "type": "amazon-ebs", "ena_support": true, }, }, diff --git a/fix/fixer_amazon_private_ip.go b/fix/fixer_amazon_private_ip.go new file mode 100644 index 000000000..7bfce1291 --- /dev/null +++ b/fix/fixer_amazon_private_ip.go @@ -0,0 +1,70 @@ +package fix + +import ( + "log" + "strings" + + "github.com/mitchellh/mapstructure" +) + +// FixerAmazonPrivateIP is a Fixer that replaces instances of `"private_ip": +// true` with `"ssh_interface": "private_ip"` +type FixerAmazonPrivateIP struct{} + +func (FixerAmazonPrivateIP) Fix(input map[string]interface{}) (map[string]interface{}, error) { + type template struct { + Builders []map[string]interface{} + } + + // Decode the input into our structure, if we can + var tpl template + if err := mapstructure.Decode(input, &tpl); err != nil { + return nil, err + } + + // Go through each builder and replace the enhanced_networking if we can + for _, builder := range tpl.Builders { + builderTypeRaw, ok := builder["type"] + if !ok { + continue + } + + builderType, ok := builderTypeRaw.(string) + if !ok { + continue + } + + if !strings.HasPrefix(builderType, "amazon-") { + continue + } + + // if ssh_interface already set, do nothing + if _, ok := builder["ssh_interface"]; ok { + continue + } + + privateIPi, ok := builder["ssh_private_ip"] + if !ok { + continue + } + privateIP, ok := privateIPi.(bool) + if !ok { + log.Fatalf("Wrong type for ssh_private_ip") + continue + } + + delete(builder, "ssh_private_ip") + if privateIP { + builder["ssh_interface"] = "private_ip" + } else { + builder["ssh_interface"] = "public_ip" + } + } + + input["builders"] = tpl.Builders + return input, nil +} + +func (FixerAmazonPrivateIP) Synopsis() string { + return "Replaces `\"ssh_private_ip\": true` in amazon builders with `\"ssh_interface\": \"private_ip\"`" +} diff --git a/fix/fixer_amazon_private_ip_test.go b/fix/fixer_amazon_private_ip_test.go new file mode 100644 index 000000000..554584ded --- /dev/null +++ b/fix/fixer_amazon_private_ip_test.go @@ -0,0 +1,64 @@ +package fix + +import ( + "reflect" + "testing" +) + +func TestFixerAmazonPrivateIP_Impl(t *testing.T) { + var _ Fixer = new(FixerAmazonPrivateIP) +} + +func TestFixerAmazonPrivateIP(t *testing.T) { + cases := []struct { + Input map[string]interface{} + Expected map[string]interface{} + }{ + // Attach field == false + { + Input: map[string]interface{}{ + "type": "amazon-ebs", + "ssh_private_ip": false, + }, + + Expected: map[string]interface{}{ + "type": "amazon-ebs", + "ssh_interface": "public_ip", + }, + }, + + // Attach field == true + { + Input: map[string]interface{}{ + "type": "amazon-ebs", + "ssh_private_ip": true, + }, + + Expected: map[string]interface{}{ + "type": "amazon-ebs", + "ssh_interface": "private_ip", + }, + }, + } + + for _, tc := range cases { + var f FixerAmazonPrivateIP + + input := map[string]interface{}{ + "builders": []map[string]interface{}{tc.Input}, + } + + expected := map[string]interface{}{ + "builders": []map[string]interface{}{tc.Expected}, + } + + output, err := f.Fix(input) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(output, expected) { + t.Fatalf("unexpected: %#v\nexpected: %#v\n", output, expected) + } + } +} diff --git a/fix/fixer_amazon_shutdown_behavior.go b/fix/fixer_amazon_shutdown_behavior.go index 2e47c5592..686ef5119 100644 --- a/fix/fixer_amazon_shutdown_behavior.go +++ b/fix/fixer_amazon_shutdown_behavior.go @@ -1,6 +1,8 @@ package fix import ( + "strings" + "github.com/mitchellh/mapstructure" ) @@ -31,7 +33,7 @@ func (FixerAmazonShutdownBehavior) Fix(input map[string]interface{}) (map[string continue } - if builderType != "amazon-ebs" && builderType != "amazon-ebsvolume" && builderType != "amazon-instance" && builderType != "amazon-chroot" { + if !strings.HasPrefix(builderType, "amazon-") { continue } From 87f8a2e8fe47f41d20b1c1b155772712737fee0e Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 9 Feb 2018 10:08:59 -0800 Subject: [PATCH 239/251] update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ada8d8e5e..4b4fc6b10 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ * 3rd party plugins: We have moved internal dependencies, meaning your 3rd party plugins will no longer compile (however existing builds will still work fine); the work to fix them is minimal and documented in GH-5810. [GH-5810] * provisioner/file: We've made destination semantics more consistent across the various communicators. In general, if the destination is a directory, files will be uploaded into the directory instead of failing. This mirrors the behavior of `rsync`. There's a chance some users might be depending on the previous buggy behavior, so it's worth ensuring your configuration is correct. [GH-5426] * builder/openstack: Extension support has been removed. To use OpenStack builder with the OpenStack Newton (Oct 2016) or earlier, we recommend you use Packer v1.1.2 or earlier version. +* builder/amazon: The `ssh_private_ip` option has been removed. Instead, please use `"ssh_interface": "private"`. A fixer has been written for this, which can be invoked with `packer fix`. [GH-5876] ### IMPROVEMENTS: From b863b7ab9ea3c6b13fd2f8031bf81deeddcbb13d Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 9 Feb 2018 14:28:49 -0800 Subject: [PATCH 240/251] fix outgoing links --- .../assets/images/Adobe_PDF_file_icon_24x24.png | Bin 1288 -> 0 bytes website/source/docs/builders/vmware-iso.html.md | 8 ++++++-- 2 files changed, 6 insertions(+), 2 deletions(-) delete mode 100644 website/source/assets/images/Adobe_PDF_file_icon_24x24.png diff --git a/website/source/assets/images/Adobe_PDF_file_icon_24x24.png b/website/source/assets/images/Adobe_PDF_file_icon_24x24.png deleted file mode 100644 index a722e5b334140fcb2a527b44465dbf4ca6e05cd4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1288 zcmV+j1^4=iP)5)P}~C2Cc=S;&*9j zNffFDZ6Bl*O8Znwp|lTus1LpfT3TAc4+vN&eGoJ+l82T;tCmzu;tY<)iN>)rCNpzy z?#DT2@70HM?|8@Qh%VUd^Rf3@>%Z3jzt53NSL0Cy0PoSI+=?H3(~W=j@O6MQGXp?C zL{L#w6$1oA8%?3s403tvALrISi1YLFGTpPVuz)aQ=Qn#0BNnYLiTeQ2x`hY=>Rc7a zKvfX|)Zy|GHXN{i@#61#TU)<2v-Nhnt&K(_0?4HW`J1SMfR%=AMl0rA&3)<6VfsCh zMibOy{Y`KVZM7>n-Xj;+2;ch=@qwSj!8_+t@yT=R>+APzY;0WacDqhQf{*HMbged& z1v=lx-}V*U(i!CKW&Dw^QJ#Jk20i?BUndTSl&}09VuUP(^FxXoR=^3(dc8;*&2Opd z)7@_O`~7~um!_#`QI=u2xfurU_CmSvg`hwEZHV7^FqGHdg#7$bikt7Hxc(O8hreKy zwHTT(ECa(lFcK&;K@7&@JP9F~nH^nQTYEOo^Uw7AebZpLD7dJ&n2#Lep)1dyb5WJS zA09(H9jIpz0#Q6Mltdt6L<5TvRVAxs#2BT~Xp9dWIB<8CWe;_`U9>$KEU|& zOKd*=1Z5GZ9s4dN5M0W5=?wi7f29;hVNgWK4FaG7S(=fiDQTK&qtRe67|a3q(x{kI zgVEAyij56o3x=S&MqVKNU^$_^ndv)+!X?%*xTu&WlcM-)A2^{_G1bt0h*JhzO#B#E3SUkfi9Kj|ep9uf+9wNRm`KnK<4K z@UJn;6TnqC;qPQ zP(Jxb;;Dbb$_n=Wd&s2jyI8cR(+Nzoh-7svA5lK@1mWyyl4JKHJ|lnfPsqOggg2IH zJ^p*7-9bXwfx_>FKvhu@%%{Y~GlUmUP@Z^>+HW4geeUz zE{QS5k884c@4a(QcQDQE*|OOrZQ{%A3b44iNC<%o7cPuXo;>-N@p#;sCi+P&#u)4M ydTVuc^*_7*DrT$(w1Fm675Ee;KnZLD{r>^{G#E%4x}j_U0000 Virtual Disk Manager User's Guide for desktop VMware clients. + + Virtual Disk Manager User's Guide for desktop VMware clients. For ESXi, refer to the proper ESXi documentation. - `cdrom_adapter_type` (string) - The adapter type (or bus) that will be used @@ -216,7 +218,9 @@ builder. - `network_adapter_type` (string) - This is the ethernet adapter type the the virtual machine will be created with. By default the "e1000" network adapter type will be used by Packer. For more information, please consult the - Choosing a network adapter for your virtual machine for desktop VMware + + Choosing a network adapter for your virtual machine for desktop VMware clients. For ESXi, refer to the proper ESXi documentation. - `output_directory` (string) - This is the path to the directory where the From 593577c139bb57a468c414cd5c7b143469cf8a78 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 9 Feb 2018 15:01:44 -0800 Subject: [PATCH 241/251] populate missing lines from changelog --- CHANGELOG.md | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b4fc6b10..00abba155 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ### BACKWARDS INCOMPATIBILITIES: * core: Affects Windows guests: User variables containing Powershell special characters no longer need to be escaped.[GH-5376] -* provisioner/powershell: Regression from v1.1.1 forcing extra escaping of environment variables in the non-elevated provisioner has been fixed. [GH-5515] +* provisioner/powershell: Regression from v1.1.1 forcing extra escaping of environment variables in the non-elevated provisioner has been fixed. [GH-5515] [GH-5872] * 3rd party plugins: We have moved internal dependencies, meaning your 3rd party plugins will no longer compile (however existing builds will still work fine); the work to fix them is minimal and documented in GH-5810. [GH-5810] * provisioner/file: We've made destination semantics more consistent across the various communicators. In general, if the destination is a directory, files will be uploaded into the directory instead of failing. This mirrors the behavior of `rsync`. There's a chance some users might be depending on the previous buggy behavior, so it's worth ensuring your configuration is correct. [GH-5426] * builder/openstack: Extension support has been removed. To use OpenStack builder with the OpenStack Newton (Oct 2016) or earlier, we recommend you use Packer v1.1.2 or earlier version. @@ -33,17 +33,39 @@ * builder/amazon: Add Paris region (eu-west-3) [GH-5718] * builder/azure: Add validation for incorrect VHD URLs [GH-5695] * builder/amazon: Remove Session Token (STS) from being shown in the log. [GH-5665] +* post-processor/vsphere-template: Now accepts artifacts from the vSphere post-processor. [GH-5380] +* builder/vmware-iso: Add support for usb/serial/parallel ports. [GH-3417] +* builder/vmware-iso: Add support for virtual soundcards. [GH-3417] +* builder/vmware-iso: Add support for setting network type and network adapter type. [GH-3417] +* builder/vmware-iso: Add support for cdrom and disk adapter types. [GH-3417] +* builder/vmware-iso: More reliably retrieve the guest networking configuration. [GH-3417] +* builder/amazon: Timeout early if metadata service can't be reached. [GH-5764] +* builder/amazon: Report which authentication provider we're using. [GH-5764] +* builder/amazon: Give better error messages if we have trouble during authentication. [GH-5764] +* builder/amazon: Add `skip_metadata_api_check` option to skip consulting the amazon metadata service. [GH-5764] +* core: Improved support for downloading and validating a uri containing a Windows UNC path or a relative file:// scheme. [GH-2906] +* builder/google: Support specifying licenses for images. [GH-5842] +* post-processor/google-export: Synchronize credential semantics with the Google builder. [GH-4148] +* builder/vmware: Add support for "super" key in `boot_command`. [GH-5681] +* core: Gracefully clean up resources on SIGTERM. [GH-5318] +* builder/hyper-v: Allow MAC address specification. [GH-5709] +* communicator/ssh: Detect dead connections. [GH-4709] ### BUG FIXES: +* provisioner/ansible-remote: Fixes an error where Packer's private key can be overridden by inherited `ansible_ssh_private_key` options. [GH-5869] * builder/alicloud-ecs: Attach keypair before starting instance in alicloud builder [GH-5739] * builder/vmware: Fixed file handle leak that may have caused race conditions in vmware builder [GH-5767] -* provisioner/powershell: Regression from v1.1.1 forcing extra escaping of environment variables in the non-elevated provisioner has been fixed. [GH-5515] +* provisioner/powershell: Regression from v1.1.1 forcing extra escaping of environment variables in the non-elevated provisioner has been fixed. [GH-5515] [GH-5872] * provisioner/ansible: The "default extra variables" feature added in Packer v1.0.1 caused the ansible-local provisioner to fail when an --extra-vars argument was specified in the extra_arguments configuration option; this has been fixed. [GH-5335] * communicator/ssh: Add deadline to SSH connection to prevent Packer hangs after script provisioner reboots vm [GH-4684] * builder/virtualbox: Fix regression affecting users running Packer on a Windows host that kept Packer from finding Virtualbox guest additions if Packer ran on a different drive from the one where the guest additions were stored. [GH-5761] * builder/virtualbox: Fix interpolation ordering so that edge cases around guest_additions_url are handled correctly [GH-5757] * builder/amazon: NewSession now inherits MaxRetries and other settings. [GH-5719] +* builder/amazon: Fix tagging support when building in us-gov/china. [GH-5841] +* builder/vmware: Fix case where artifacts might not be cleaned up correctly. [GH-5835] +* provisioner/ansible-local: Fix support for `--extra-vars` in `extra_arguments`. [GH-5703] +* communicator/winrm: Fix issue copying empty directories. [GH-5763] ## 1.1.3 (December 8, 2017) From 034e722650794a98efd4dec48050fe4e175289c8 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 9 Feb 2018 15:02:32 -0800 Subject: [PATCH 242/251] format changelog --- CHANGELOG.md | 154 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 104 insertions(+), 50 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 00abba155..c9e38a60b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,71 +1,125 @@ ## (UNRELEASED) ### BACKWARDS INCOMPATIBILITIES: -* core: Affects Windows guests: User variables containing Powershell special characters no longer need to be escaped.[GH-5376] -* provisioner/powershell: Regression from v1.1.1 forcing extra escaping of environment variables in the non-elevated provisioner has been fixed. [GH-5515] [GH-5872] -* 3rd party plugins: We have moved internal dependencies, meaning your 3rd party plugins will no longer compile (however existing builds will still work fine); the work to fix them is minimal and documented in GH-5810. [GH-5810] -* provisioner/file: We've made destination semantics more consistent across the various communicators. In general, if the destination is a directory, files will be uploaded into the directory instead of failing. This mirrors the behavior of `rsync`. There's a chance some users might be depending on the previous buggy behavior, so it's worth ensuring your configuration is correct. [GH-5426] -* builder/openstack: Extension support has been removed. To use OpenStack builder with the OpenStack Newton (Oct 2016) or earlier, we recommend you use Packer v1.1.2 or earlier version. -* builder/amazon: The `ssh_private_ip` option has been removed. Instead, please use `"ssh_interface": "private"`. A fixer has been written for this, which can be invoked with `packer fix`. [GH-5876] +* 3rd party plugins: We have moved internal dependencies, meaning your 3rd + party plugins will no longer compile (however existing builds will still + work fine); the work to fix them is minimal and documented in GH-5810. + [GH-5810] +* builder/amazon: The `ssh_private_ip` option has been removed. Instead, please + use `"ssh_interface": "private"`. A fixer has been written for this, which + can be invoked with `packer fix`. [GH-5876] +* builder/openstack: Extension support has been removed. To use OpenStack + builder with the OpenStack Newton (Oct 2016) or earlier, we recommend you + use Packer v1.1.2 or earlier version. +* core: Affects Windows guests: User variables containing Powershell special + characters no longer need to be escaped.[GH-5376] +* provisioner/file: We've made destination semantics more consistent across the + various communicators. In general, if the destination is a directory, files + will be uploaded into the directory instead of failing. This mirrors the + behavior of `rsync`. There's a chance some users might be depending on the + previous buggy behavior, so it's worth ensuring your configuration is + correct. [GH-5426] +* provisioner/powershell: Regression from v1.1.1 forcing extra escaping of + environment variables in the non-elevated provisioner has been fixed. + [GH-5515] [GH-5872] ### IMPROVEMENTS: -* **New builder:** `scaleway` - The Scaleway Packer builder is able to create new images for use with Scaleway BareMetal and Virtual cloud server. [GH-4770] -* **New builder:** `ncloud` for building server images using the NAVER Cloud Platform. [GH-5791] -* **New builder:** `oci-classic` for building new custom images for use with Oracle Cloud Infrastructure Classic Compute. [GH-5819] -* builder/docker: Remove credentials from being shown in the log. [GH-5666] -* builder/triton: Triton RBAC is now supported. [GH-5741] -* provisioner/ansible: Improve user retrieval. [GH-5758] -* post-processor/docker: Remove credentials from being shown in the log. [GH-5666] -* builder/amazon: Warn during prepare if we didn't get both an access key and a secret key when we were expecting one. [GH-5762] -* builder/amazon: Replace `InstanceStatusOK` check with `InstanceReady`. This reduces build times universally while still working for all instance types. [GH-5678] +* **New builder:** `ncloud` for building server images using the NAVER Cloud + Platform. [GH-5791] +* **New builder:** `oci-classic` for building new custom images for use with + Oracle Cloud Infrastructure Classic Compute. [GH-5819] +* **New builder:** `scaleway` - The Scaleway Packer builder is able to create + new images for use with Scaleway BareMetal and Virtual cloud server. + [GH-4770] * builder/amazon: Add `kms_key_id` option to block device mappings. [GH-5774] -* builder/hyper-v: New option to use differential disks and Inline disk creation to improve build time and reduce disk usage [GH-5631] -* post-processor/vagrant: Add vagrant post-processor support for Google [GH-5732] -* provisioner/chef: Added Policyfile support to chef-client provisioner. [GH-5831] -* builder/qemu: Add Intel HAXM support to QEMU builder [GH-5738] -* communicator/ssh: Add session-level keep-alives [GH-5830] -* post-processor/amazon-import: Allow user to specify role name in amazon-import [GH-5817] -* provisioner/chef: Add support for 'trusted_certs_dir' chef-client configuration option [GH-5790] -* builder/triton: Updated triton-go dependencies, allowing better error handling. [GH-5795] -* core: Improved error logging in floppy file handling. [GH-5802] -* provisioner/amazon: Use Amazon SDK's InstanceRunning waiter instead of InstanceStatusOK waiter [GH-5773] +* builder/amazon: Add `skip_metadata_api_check` option to skip consulting the + amazon metadata service. [GH-5764] * builder/amazon: Add Paris region (eu-west-3) [GH-5718] +* builder/amazon: Give better error messages if we have trouble during + authentication. [GH-5764] +* builder/amazon: Remove Session Token (STS) from being shown in the log. + [GH-5665] +* builder/amazon: Replace `InstanceStatusOK` check with `InstanceReady`. This + reduces build times universally while still working for all instance types. + [GH-5678] +* builder/amazon: Report which authentication provider we're using. [GH-5764] +* builder/amazon: Timeout early if metadata service can't be reached. [GH-5764] +* builder/amazon: Warn during prepare if we didn't get both an access key and a + secret key when we were expecting one. [GH-5762] * builder/azure: Add validation for incorrect VHD URLs [GH-5695] -* builder/amazon: Remove Session Token (STS) from being shown in the log. [GH-5665] -* post-processor/vsphere-template: Now accepts artifacts from the vSphere post-processor. [GH-5380] +* builder/docker: Remove credentials from being shown in the log. [GH-5666] +* builder/google: Support specifying licenses for images. [GH-5842] +* builder/hyper-v: Allow MAC address specification. [GH-5709] +* builder/hyper-v: New option to use differential disks and Inline disk + creation to improve build time and reduce disk usage [GH-5631] +* builder/qemu: Add Intel HAXM support to QEMU builder [GH-5738] +* builder/triton: Triton RBAC is now supported. [GH-5741] +* builder/triton: Updated triton-go dependencies, allowing better error + handling. [GH-5795] +* builder/vmware-iso: Add support for cdrom and disk adapter types. [GH-3417] +* builder/vmware-iso: Add support for setting network type and network adapter + type. [GH-3417] * builder/vmware-iso: Add support for usb/serial/parallel ports. [GH-3417] * builder/vmware-iso: Add support for virtual soundcards. [GH-3417] -* builder/vmware-iso: Add support for setting network type and network adapter type. [GH-3417] -* builder/vmware-iso: Add support for cdrom and disk adapter types. [GH-3417] -* builder/vmware-iso: More reliably retrieve the guest networking configuration. [GH-3417] -* builder/amazon: Timeout early if metadata service can't be reached. [GH-5764] -* builder/amazon: Report which authentication provider we're using. [GH-5764] -* builder/amazon: Give better error messages if we have trouble during authentication. [GH-5764] -* builder/amazon: Add `skip_metadata_api_check` option to skip consulting the amazon metadata service. [GH-5764] -* core: Improved support for downloading and validating a uri containing a Windows UNC path or a relative file:// scheme. [GH-2906] -* builder/google: Support specifying licenses for images. [GH-5842] -* post-processor/google-export: Synchronize credential semantics with the Google builder. [GH-4148] +* builder/vmware-iso: More reliably retrieve the guest networking + configuration. [GH-3417] * builder/vmware: Add support for "super" key in `boot_command`. [GH-5681] -* core: Gracefully clean up resources on SIGTERM. [GH-5318] -* builder/hyper-v: Allow MAC address specification. [GH-5709] +* communicator/ssh: Add session-level keep-alives [GH-5830] * communicator/ssh: Detect dead connections. [GH-4709] +* core: Gracefully clean up resources on SIGTERM. [GH-5318] +* core: Improved error logging in floppy file handling. [GH-5802] +* core: Improved support for downloading and validating a uri containing a + Windows UNC path or a relative file:// scheme. [GH-2906] +* post-processor/amazon-import: Allow user to specify role name in amazon- + import [GH-5817] +* post-processor/docker: Remove credentials from being shown in the log. + [GH-5666] +* post-processor/google-export: Synchronize credential semantics with the + Google builder. [GH-4148] +* post-processor/vagrant: Add vagrant post-processor support for Google + [GH-5732] +* post-processor/vsphere-template: Now accepts artifacts from the vSphere post- + processor. [GH-5380] +* provisioner/amazon: Use Amazon SDK's InstanceRunning waiter instead of + InstanceStatusOK waiter [GH-5773] +* provisioner/ansible: Improve user retrieval. [GH-5758] +* provisioner/chef: Add support for 'trusted_certs_dir' chef-client + configuration option [GH-5790] +* provisioner/chef: Added Policyfile support to chef-client provisioner. + [GH-5831] ### BUG FIXES: -* provisioner/ansible-remote: Fixes an error where Packer's private key can be overridden by inherited `ansible_ssh_private_key` options. [GH-5869] -* builder/alicloud-ecs: Attach keypair before starting instance in alicloud builder [GH-5739] -* builder/vmware: Fixed file handle leak that may have caused race conditions in vmware builder [GH-5767] -* provisioner/powershell: Regression from v1.1.1 forcing extra escaping of environment variables in the non-elevated provisioner has been fixed. [GH-5515] [GH-5872] -* provisioner/ansible: The "default extra variables" feature added in Packer v1.0.1 caused the ansible-local provisioner to fail when an --extra-vars argument was specified in the extra_arguments configuration option; this has been fixed. [GH-5335] -* communicator/ssh: Add deadline to SSH connection to prevent Packer hangs after script provisioner reboots vm [GH-4684] -* builder/virtualbox: Fix regression affecting users running Packer on a Windows host that kept Packer from finding Virtualbox guest additions if Packer ran on a different drive from the one where the guest additions were stored. [GH-5761] -* builder/virtualbox: Fix interpolation ordering so that edge cases around guest_additions_url are handled correctly [GH-5757] -* builder/amazon: NewSession now inherits MaxRetries and other settings. [GH-5719] +* builder/alicloud-ecs: Attach keypair before starting instance in alicloud + builder [GH-5739] * builder/amazon: Fix tagging support when building in us-gov/china. [GH-5841] -* builder/vmware: Fix case where artifacts might not be cleaned up correctly. [GH-5835] -* provisioner/ansible-local: Fix support for `--extra-vars` in `extra_arguments`. [GH-5703] +* builder/amazon: NewSession now inherits MaxRetries and other settings. + [GH-5719] +* builder/virtualbox: Fix interpolation ordering so that edge cases around + guest_additions_url are handled correctly [GH-5757] +* builder/virtualbox: Fix regression affecting users running Packer on a + Windows host that kept Packer from finding Virtualbox guest additions if + Packer ran on a different drive from the one where the guest additions were + stored. [GH-5761] +* builder/vmware: Fix case where artifacts might not be cleaned up correctly. + [GH-5835] +* builder/vmware: Fixed file handle leak that may have caused race conditions + in vmware builder [GH-5767] +* communicator/ssh: Add deadline to SSH connection to prevent Packer hangs + after script provisioner reboots vm [GH-4684] * communicator/winrm: Fix issue copying empty directories. [GH-5763] +* provisioner/ansible-local: Fix support for `--extra-vars` in + `extra_arguments`. [GH-5703] +* provisioner/ansible-remote: Fixes an error where Packer's private key can be + overridden by inherited `ansible_ssh_private_key` options. [GH-5869] +* provisioner/ansible: The "default extra variables" feature added in Packer + v1.0.1 caused the ansible-local provisioner to fail when an --extra-vars + argument was specified in the extra_arguments configuration option; this + has been fixed. [GH-5335] +* provisioner/powershell: Regression from v1.1.1 forcing extra escaping of + environment variables in the non-elevated provisioner has been fixed. + [GH-5515] [GH-5872] ## 1.1.3 (December 8, 2017) From 0093f48614379d11248f2b971fa7cb86de59bd0a Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 9 Feb 2018 15:07:10 -0800 Subject: [PATCH 243/251] prep for 1.2.0 --- CHANGELOG.md | 2 +- version/version.go | 4 ++-- website/config.rb | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c9e38a60b..622d01f9f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## (UNRELEASED) +## 1.2.0 (February 9, 2018) ### BACKWARDS INCOMPATIBILITIES: * 3rd party plugins: We have moved internal dependencies, meaning your 3rd diff --git a/version/version.go b/version/version.go index 03ee6f644..5538683af 100644 --- a/version/version.go +++ b/version/version.go @@ -9,12 +9,12 @@ import ( var GitCommit string // The main version number that is being run at the moment. -const Version = "1.1.4" +const Version = "1.2.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" func FormattedVersion() string { var versionString bytes.Buffer diff --git a/website/config.rb b/website/config.rb index fc5caf4e5..e6d530fdc 100644 --- a/website/config.rb +++ b/website/config.rb @@ -2,7 +2,7 @@ set :base_url, "https://www.packer.io/" activate :hashicorp do |h| h.name = "packer" - h.version = "1.1.3" + h.version = "1.2.0" h.github_slug = "hashicorp/packer" h.website_root = "website" end From ca393b96f3909c3cde0f63d5aea211573dd22bc1 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 9 Feb 2018 15:14:55 -0800 Subject: [PATCH 244/251] Cut version 1.2.0 From f2a94a97b7714ae210230435e81d6f137585a183 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 9 Feb 2018 16:11:32 -0800 Subject: [PATCH 245/251] next version is 1.2.1 --- version/version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version/version.go b/version/version.go index 5538683af..93fffef3e 100644 --- a/version/version.go +++ b/version/version.go @@ -9,12 +9,12 @@ import ( var GitCommit string // The main version number that is being run at the moment. -const Version = "1.2.0" +const Version = "1.2.1" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" func FormattedVersion() string { var versionString bytes.Buffer From 9e636f01ed51bda8e1325885d938587d078a9468 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 9 Feb 2018 16:18:08 -0800 Subject: [PATCH 246/251] prep changelog for next release --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 622d01f9f..d8878587c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,6 @@ +## UNRELEASED + + ## 1.2.0 (February 9, 2018) ### BACKWARDS INCOMPATIBILITIES: From 21313f4cf3740e8d1f7f7fe8efb8c2f0f4d23b07 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Sat, 10 Feb 2018 13:59:28 -0800 Subject: [PATCH 247/251] fix link --- website/source/docs/builders/oracle.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/oracle.html.md b/website/source/docs/builders/oracle.html.md index 70a1917d0..7184b0a59 100644 --- a/website/source/docs/builders/oracle.html.md +++ b/website/source/docs/builders/oracle.html.md @@ -17,6 +17,6 @@ designed to support both platforms. Please choose the one that's right for you: instance and creating an image list from a snapshot of it after provisioning. -- [oracle-oci](/docs/builders/amazon-instance.html) - Create custom images in +- [oracle-oci](/docs/builders/oracle-oci.html) - Create custom images in Oracle Cloud Infrastructure (OCI) by launching a base instance and creating an image from it after provisioning. From 2747562708efe81983f70445c058f9ee7413b82d Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Sun, 11 Feb 2018 13:57:25 -0800 Subject: [PATCH 248/251] remove ssh_private_ip from docs --- website/source/docs/builders/amazon-ebs.html.md | 4 ++-- website/source/docs/builders/amazon-ebssurrogate.html.md | 4 ++-- website/source/docs/builders/amazon-ebsvolume.html.md | 4 ++-- website/source/docs/builders/amazon-instance.html.md | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/website/source/docs/builders/amazon-ebs.html.md b/website/source/docs/builders/amazon-ebs.html.md index 892029d26..4f2a44498 100644 --- a/website/source/docs/builders/amazon-ebs.html.md +++ b/website/source/docs/builders/amazon-ebs.html.md @@ -337,8 +337,8 @@ builder. in AWS with the source instance, set the `ssh_keypair_name` field to the name of the key pair. -- `ssh_private_ip` (boolean) - *Deprecated* use `ssh_interface` instead. If `true`, - then SSH will always use the private IP if available. Also works for WinRM. +- `ssh_private_ip` (boolean) - No longer supported. See + [`ssh_interface`](#ssh_interface). A fixer exists to migrate. - `ssh_interface` (string) - One of `public_ip`, `private_ip`, `public_dns` or `private_dns`. If set, either the public IP address, diff --git a/website/source/docs/builders/amazon-ebssurrogate.html.md b/website/source/docs/builders/amazon-ebssurrogate.html.md index 42aaa8664..c01c3c493 100644 --- a/website/source/docs/builders/amazon-ebssurrogate.html.md +++ b/website/source/docs/builders/amazon-ebssurrogate.html.md @@ -330,8 +330,8 @@ builder. in AWS with the source instance, set the `ssh_keypair_name` field to the name of the key pair. -- `ssh_private_ip` (boolean) - *Deprecated* use `ssh_interface` instead. If `true`, - then SSH will always use the private IP if available. Also works for WinRM. +- `ssh_private_ip` (boolean) - No longer supported. See + [`ssh_interface`](#ssh_interface). A fixer exists to migrate. - `ssh_interface` (string) - One of `public_ip`, `private_ip`, `public_dns` or `private_dns`. If set, either the public IP address, diff --git a/website/source/docs/builders/amazon-ebsvolume.html.md b/website/source/docs/builders/amazon-ebsvolume.html.md index 5580897f2..186295b87 100644 --- a/website/source/docs/builders/amazon-ebsvolume.html.md +++ b/website/source/docs/builders/amazon-ebsvolume.html.md @@ -224,8 +224,8 @@ builder. [`ssh_private_key_file`](/docs/templates/communicator.html#ssh_private_key_file) must be specified with this. -- `ssh_private_ip` (boolean) - *Deprecated* use `ssh_interface` instead. If `true`, - then SSH will always use the private IP if available. Also works for WinRM. +- `ssh_private_ip` (boolean) - No longer supported. See + [`ssh_interface`](#ssh_interface). A fixer exists to migrate. - `ssh_interface` (string) - One of `public_ip`, `private_ip`, `public_dns` or `private_dns`. If set, either the public IP address, diff --git a/website/source/docs/builders/amazon-instance.html.md b/website/source/docs/builders/amazon-instance.html.md index 624c5d6e0..924e174dd 100644 --- a/website/source/docs/builders/amazon-instance.html.md +++ b/website/source/docs/builders/amazon-instance.html.md @@ -338,8 +338,8 @@ builder. in AWS with the source instance, set the `ssh_keypair_name` field to the name of the key pair. -- `ssh_private_ip` (boolean) - *Deprecated* use `ssh_interface` instead. If `true`, - then SSH will always use the private IP if available. Also works for WinRM. +- `ssh_private_ip` (boolean) - No longer supported. See + [`ssh_interface`](#ssh_interface). A fixer exists to migrate. - `ssh_interface` (string) - One of `public_ip`, `private_ip`, `public_dns` or `private_dns`. If set, either the public IP address, From a4123eabe35e3d29e3aa6d50efa107800aa87cb6 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Mon, 12 Feb 2018 10:45:53 -0800 Subject: [PATCH 249/251] prevent 0-value ticker during ssh keepalive --- communicator/ssh/communicator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 5c2cfac9d..b5b67633c 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -112,7 +112,7 @@ func (c *comm) Start(cmd *packer.RemoteCmd) (err error) { } go func() { - if c.config.KeepAliveInterval < 0 { + if c.config.KeepAliveInterval <= 0 { return } c := time.NewTicker(c.config.KeepAliveInterval) From 7d9a86becb0d4790ce4cd10ecd6eb7c3f059db89 Mon Sep 17 00:00:00 2001 From: SwampDragons Date: Mon, 12 Feb 2018 11:03:19 -0800 Subject: [PATCH 250/251] Revert "Fix #5335" --- provisioner/ansible-local/provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provisioner/ansible-local/provisioner.go b/provisioner/ansible-local/provisioner.go index e007bfb85..ec1eec041 100644 --- a/provisioner/ansible-local/provisioner.go +++ b/provisioner/ansible-local/provisioner.go @@ -304,7 +304,7 @@ func (p *Provisioner) executeAnsible(ui packer.Ui, comm packer.Communicator) err playbook := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(p.config.PlaybookFile))) inventory := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(p.config.InventoryFile))) - extraArgs := fmt.Sprintf(" --extra-vars \\\"packer_build_name=%s packer_builder_type=%s packer_http_addr=%s\\\" ", + extraArgs := fmt.Sprintf(" --extra-vars \"packer_build_name=%s packer_builder_type=%s packer_http_addr=%s\" ", p.config.PackerBuildName, p.config.PackerBuilderType, common.GetHTTPAddr()) if len(p.config.ExtraArguments) > 0 { extraArgs = extraArgs + strings.Join(p.config.ExtraArguments, " ") From f3ac1af5dfbb37cb6de1e247be13cc45d4c65f6c Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Mon, 12 Feb 2018 11:18:50 -0800 Subject: [PATCH 251/251] add scaleway codeowners --- CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/CODEOWNERS b/CODEOWNERS index 050f4d084..216cce578 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -14,6 +14,7 @@ /builder/profitbricks/ @jasmingacic /builder/triton/ @jen20 @sean- /builder/ncloud/ @YuSungDuk +/builder/scaleway/ @dimtion @edouardb # provisioners