From 4fa0f6baa308b92ca1a0486d69a310dfa984affe Mon Sep 17 00:00:00 2001 From: "Dax T. Games" Date: Tue, 4 Feb 2020 14:25:45 -0500 Subject: [PATCH 01/61] recreate https://github.com/jetbrains-infra/packer-builder-vsphere/pull/238 --- builder/vsphere/driver/vm_cdrom.go | 17 +++++++++++++++++ builder/vsphere/iso/builder.go | 4 +++- builder/vsphere/iso/config.go | 1 + builder/vsphere/iso/step_remove_cdrom.go | 19 +++++++++++++++++-- 4 files changed, 38 insertions(+), 3 deletions(-) diff --git a/builder/vsphere/driver/vm_cdrom.go b/builder/vsphere/driver/vm_cdrom.go index c5d7e29ed..d33b75829 100644 --- a/builder/vsphere/driver/vm_cdrom.go +++ b/builder/vsphere/driver/vm_cdrom.go @@ -51,6 +51,23 @@ func (vm *VirtualMachine) CreateCdrom(c *types.VirtualController) (*types.Virtua return device, nil } +func (vm *VirtualMachine) RemoveCdroms() error { + devices, err := vm.Devices() + if err != nil { + return err + } + cdroms := devices.SelectByType((*types.VirtualCdrom)(nil)) + if err = vm.RemoveDevice(true, cdroms...); err != nil { + return err + } + + sata := devices.SelectByType((*types.VirtualAHCIController)(nil)) + if err = vm.RemoveDevice(true, sata...); err != nil { + return err + } + return nil +} + func (vm *VirtualMachine) EjectCdroms() error { devices, err := vm.Devices() if err != nil { diff --git a/builder/vsphere/iso/builder.go b/builder/vsphere/iso/builder.go index 1f6a2bd20..9702cc96a 100644 --- a/builder/vsphere/iso/builder.go +++ b/builder/vsphere/iso/builder.go @@ -121,7 +121,9 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack } steps = append(steps, - &StepRemoveCDRom{}, + &StepRemoveCDRom{ + Config: &b.config.RemoveCDRomConfig, + }, &common.StepCreateSnapshot{ CreateSnapshot: b.config.CreateSnapshot, }, diff --git a/builder/vsphere/iso/config.go b/builder/vsphere/iso/config.go index e3c297fa4..991827c94 100644 --- a/builder/vsphere/iso/config.go +++ b/builder/vsphere/iso/config.go @@ -24,6 +24,7 @@ type Config struct { packerCommon.ISOConfig `mapstructure:",squash"` CDRomConfig `mapstructure:",squash"` + RemoveCDRomConfig `mapstructure:",squash"` FloppyConfig `mapstructure:",squash"` common.RunConfig `mapstructure:",squash"` BootConfig `mapstructure:",squash"` diff --git a/builder/vsphere/iso/step_remove_cdrom.go b/builder/vsphere/iso/step_remove_cdrom.go index bd2e560cd..afc09344e 100644 --- a/builder/vsphere/iso/step_remove_cdrom.go +++ b/builder/vsphere/iso/step_remove_cdrom.go @@ -7,7 +7,13 @@ import ( "github.com/hashicorp/packer/packer" ) -type StepRemoveCDRom struct{} +type RemoveCDRomConfig struct { + RemoveCdrom bool `mapstructure:"remove_cdrom"` +} + +type StepRemoveCDRom struct { + Config *RemoveCDRomConfig +} func (s *StepRemoveCDRom) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) @@ -20,7 +26,16 @@ func (s *StepRemoveCDRom) Run(_ context.Context, state multistep.StateBag) multi return multistep.ActionHalt } - return multistep.ActionContinue + if s.Config.RemoveCdrom == true { + ui.Say("Deleting CD-ROM drives...") + err := vm.RemoveCdroms() + if err != nil { + state.Put("error", err) + return multistep.ActionHalt + } + } + + return multistep.ActionContinue } func (s *StepRemoveCDRom) Cleanup(state multistep.StateBag) {} From 9756a9858a82dfd9a5b5f306e8a48920e594752b Mon Sep 17 00:00:00 2001 From: Dax T Games Date: Tue, 4 Feb 2020 14:41:44 -0500 Subject: [PATCH 02/61] Update builder.go --- builder/vsphere/iso/builder.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/vsphere/iso/builder.go b/builder/vsphere/iso/builder.go index 9702cc96a..49b96cae1 100644 --- a/builder/vsphere/iso/builder.go +++ b/builder/vsphere/iso/builder.go @@ -121,7 +121,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack } steps = append(steps, - &StepRemoveCDRom{ + &StepRemoveCDRom{ Config: &b.config.RemoveCDRomConfig, }, &common.StepCreateSnapshot{ From 6b82bf5a93c4a04a7a2b5499d921f994f4770111 Mon Sep 17 00:00:00 2001 From: "Dax T. Games" Date: Wed, 5 Feb 2020 15:25:49 +0000 Subject: [PATCH 03/61] format --- builder/vsphere/iso/builder.go | 2 +- builder/vsphere/iso/config.go | 2 +- builder/vsphere/iso/step_remove_cdrom.go | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/vsphere/iso/builder.go b/builder/vsphere/iso/builder.go index 49b96cae1..d35a9e3e3 100644 --- a/builder/vsphere/iso/builder.go +++ b/builder/vsphere/iso/builder.go @@ -121,7 +121,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack } steps = append(steps, - &StepRemoveCDRom{ + &StepRemoveCDRom{ Config: &b.config.RemoveCDRomConfig, }, &common.StepCreateSnapshot{ diff --git a/builder/vsphere/iso/config.go b/builder/vsphere/iso/config.go index 991827c94..63a4eb476 100644 --- a/builder/vsphere/iso/config.go +++ b/builder/vsphere/iso/config.go @@ -24,7 +24,7 @@ type Config struct { packerCommon.ISOConfig `mapstructure:",squash"` CDRomConfig `mapstructure:",squash"` - RemoveCDRomConfig `mapstructure:",squash"` + RemoveCDRomConfig `mapstructure:",squash"` FloppyConfig `mapstructure:",squash"` common.RunConfig `mapstructure:",squash"` BootConfig `mapstructure:",squash"` diff --git a/builder/vsphere/iso/step_remove_cdrom.go b/builder/vsphere/iso/step_remove_cdrom.go index afc09344e..9fd3075a4 100644 --- a/builder/vsphere/iso/step_remove_cdrom.go +++ b/builder/vsphere/iso/step_remove_cdrom.go @@ -26,7 +26,7 @@ func (s *StepRemoveCDRom) Run(_ context.Context, state multistep.StateBag) multi return multistep.ActionHalt } - if s.Config.RemoveCdrom == true { + if s.Config.RemoveCdrom == true { ui.Say("Deleting CD-ROM drives...") err := vm.RemoveCdroms() if err != nil { @@ -35,7 +35,7 @@ func (s *StepRemoveCDRom) Run(_ context.Context, state multistep.StateBag) multi } } - return multistep.ActionContinue + return multistep.ActionContinue } func (s *StepRemoveCDRom) Cleanup(state multistep.StateBag) {} From 76680ac1c617c93863321234aa34bb0229262941 Mon Sep 17 00:00:00 2001 From: "Dax T. Games" Date: Wed, 5 Feb 2020 11:10:50 -0500 Subject: [PATCH 04/61] add docs --- builder/vsphere/iso/config.hcl2spec.go | 2 ++ .../builder/vsphere/iso/_CDRomConfig-not-required.html.md | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/builder/vsphere/iso/config.hcl2spec.go b/builder/vsphere/iso/config.hcl2spec.go index 7ea8949ca..222791485 100644 --- a/builder/vsphere/iso/config.hcl2spec.go +++ b/builder/vsphere/iso/config.hcl2spec.go @@ -61,6 +61,7 @@ type FlatConfig struct { TargetExtension *string `mapstructure:"iso_target_extension" cty:"iso_target_extension"` CdromType *string `mapstructure:"cdrom_type" cty:"cdrom_type"` ISOPaths []string `mapstructure:"iso_paths" cty:"iso_paths"` + RemoveCdrom *bool `mapstructure:"remove_cdrom" cty:"remove_cdrom"` FloppyIMGPath *string `mapstructure:"floppy_img_path" cty:"floppy_img_path"` FloppyFiles []string `mapstructure:"floppy_files" cty:"floppy_files"` FloppyDirectories []string `mapstructure:"floppy_dirs" cty:"floppy_dirs"` @@ -180,6 +181,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { "iso_target_extension": &hcldec.AttrSpec{Name: "iso_target_extension", Type: cty.String, Required: false}, "cdrom_type": &hcldec.AttrSpec{Name: "cdrom_type", Type: cty.String, Required: false}, "iso_paths": &hcldec.AttrSpec{Name: "iso_paths", Type: cty.List(cty.String), Required: false}, + "remove_cdrom": &hcldec.AttrSpec{Name: "remove_cdrom", Type: cty.Bool, Required: false}, "floppy_img_path": &hcldec.AttrSpec{Name: "floppy_img_path", Type: cty.String, Required: false}, "floppy_files": &hcldec.AttrSpec{Name: "floppy_files", Type: cty.List(cty.String), Required: false}, "floppy_dirs": &hcldec.AttrSpec{Name: "floppy_dirs", Type: cty.List(cty.String), Required: false}, diff --git a/website/source/partials/builder/vsphere/iso/_CDRomConfig-not-required.html.md b/website/source/partials/builder/vsphere/iso/_CDRomConfig-not-required.html.md index e3952b792..0e14df491 100644 --- a/website/source/partials/builder/vsphere/iso/_CDRomConfig-not-required.html.md +++ b/website/source/partials/builder/vsphere/iso/_CDRomConfig-not-required.html.md @@ -2,6 +2,8 @@ - `cdrom_type` (string) - Which controller to use. Example: `sata`. Defaults to `ide`. +- `remove_cdrom` (boolean) - Remove CD/DVD-ROM devices from template. Ddefaults to `false`. + - `iso_paths` ([]string) - List of datastore paths to ISO files that will be mounted to the VM. Example: `"[datastore1] ISO/ubuntu.iso"`. - \ No newline at end of file + From 90eee3829da443b4966e82d01ef0162b904e7212 Mon Sep 17 00:00:00 2001 From: "Dax T. Games" Date: Wed, 5 Feb 2020 11:13:00 -0500 Subject: [PATCH 05/61] add docs --- .../builder/vsphere/iso/_CDRomConfig-not-required.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/partials/builder/vsphere/iso/_CDRomConfig-not-required.html.md b/website/source/partials/builder/vsphere/iso/_CDRomConfig-not-required.html.md index 0e14df491..ed50e4461 100644 --- a/website/source/partials/builder/vsphere/iso/_CDRomConfig-not-required.html.md +++ b/website/source/partials/builder/vsphere/iso/_CDRomConfig-not-required.html.md @@ -2,7 +2,7 @@ - `cdrom_type` (string) - Which controller to use. Example: `sata`. Defaults to `ide`. -- `remove_cdrom` (boolean) - Remove CD/DVD-ROM devices from template. Ddefaults to `false`. +- `remove_cdrom` (boolean) - Remove CD/DVD-ROM devices from template. Defaults to `false`. - `iso_paths` ([]string) - List of datastore paths to ISO files that will be mounted to the VM. Example: `"[datastore1] ISO/ubuntu.iso"`. From a19214afeb6b294fe70dbeebe1c71192385efb1a Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Thu, 6 Feb 2020 17:07:58 +0100 Subject: [PATCH 06/61] Allow to use isos in place --- common/iso_config.go | 2 +- common/step_download.go | 39 +- common/step_download_test.go | 2 +- go.mod | 3 +- go.sum | 2 + packer/ui.go | 2 +- .../hashicorp/go-getter/.travis.yml | 24 -- .../hashicorp/go-getter/client_option.go | 46 --- .../hashicorp/go-getter/get_base.go | 20 - .../hashicorp/go-getter/get_file.go | 36 -- .../hashicorp/go-getter/get_file_unix.go | 103 ----- .../hashicorp/go-getter/v2/.gitignore | 1 + .../github.com/hashicorp/go-getter/v2/LICENSE | 354 ++++++++++++++++++ .../hashicorp/go-getter/{ => v2}/README.md | 8 +- .../hashicorp/go-getter/{ => v2}/appveyor.yml | 0 .../hashicorp/go-getter/{ => v2}/checksum.go | 44 ++- .../hashicorp/go-getter/{ => v2}/client.go | 161 ++++---- .../hashicorp/go-getter/v2/client_option.go | 22 ++ .../{ => v2}/client_option_progress.go | 12 - .../hashicorp/go-getter/{ => v2}/common.go | 0 .../hashicorp/go-getter/{ => v2}/copy_dir.go | 0 .../go-getter/{ => v2}/decompress.go | 0 .../go-getter/{ => v2}/decompress_bzip2.go | 0 .../go-getter/{ => v2}/decompress_gzip.go | 0 .../go-getter/{ => v2}/decompress_tar.go | 0 .../go-getter/{ => v2}/decompress_tbz2.go | 0 .../go-getter/{ => v2}/decompress_testing.go | 0 .../go-getter/{ => v2}/decompress_tgz.go | 0 .../go-getter/{ => v2}/decompress_txz.go | 0 .../go-getter/{ => v2}/decompress_xz.go | 0 .../go-getter/{ => v2}/decompress_zip.go | 4 +- .../hashicorp/go-getter/{ => v2}/detect.go | 0 .../go-getter/{ => v2}/detect_bitbucket.go | 0 .../go-getter/{ => v2}/detect_file.go | 0 .../go-getter/{ => v2}/detect_gcs.go | 0 .../go-getter/{ => v2}/detect_git.go | 0 .../go-getter/{ => v2}/detect_github.go | 0 .../hashicorp/go-getter/{ => v2}/detect_s3.go | 0 .../go-getter/{ => v2}/detect_ssh.go | 0 .../go-getter/{ => v2}/folder_storage.go | 6 +- .../hashicorp/go-getter/{ => v2}/get.go | 57 +-- .../hashicorp/go-getter/v2/get_base.go | 9 + .../{get_file_windows.go => v2/get_file.go} | 95 +++-- .../go-getter/{ => v2}/get_file_copy.go | 0 .../go-getter/v2/get_file_symlink.go | 10 + .../go-getter/v2/get_file_symlink_windows.go | 21 ++ .../hashicorp/go-getter/{ => v2}/get_gcs.go | 31 +- .../hashicorp/go-getter/{ => v2}/get_git.go | 80 ++-- .../hashicorp/go-getter/{ => v2}/get_hg.go | 37 +- .../hashicorp/go-getter/{ => v2}/get_http.go | 94 ++--- .../hashicorp/go-getter/{ => v2}/get_mock.go | 23 +- .../hashicorp/go-getter/{ => v2}/get_s3.go | 38 +- .../hashicorp/go-getter/{ => v2}/go.mod | 5 +- .../hashicorp/go-getter/{ => v2}/go.sum | 0 .../hashicorp/go-getter/v2/helper/url/url.go | 14 + .../go-getter/v2/helper/url/url_unix.go | 11 + .../go-getter/v2/helper/url/url_windows.go | 39 ++ .../go-getter/{client_mode.go => v2/mode.go} | 18 +- .../hashicorp/go-getter/{ => v2}/netrc.go | 0 .../hashicorp/go-getter/v2/request.go | 39 ++ .../hashicorp/go-getter/{ => v2}/source.go | 0 .../hashicorp/go-getter/{ => v2}/storage.go | 4 +- vendor/modules.txt | 4 +- 63 files changed, 906 insertions(+), 614 deletions(-) delete mode 100644 vendor/github.com/hashicorp/go-getter/.travis.yml delete mode 100644 vendor/github.com/hashicorp/go-getter/client_option.go delete mode 100644 vendor/github.com/hashicorp/go-getter/get_base.go delete mode 100644 vendor/github.com/hashicorp/go-getter/get_file.go delete mode 100644 vendor/github.com/hashicorp/go-getter/get_file_unix.go create mode 100644 vendor/github.com/hashicorp/go-getter/v2/.gitignore create mode 100644 vendor/github.com/hashicorp/go-getter/v2/LICENSE rename vendor/github.com/hashicorp/go-getter/{ => v2}/README.md (95%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/appveyor.yml (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/checksum.go (87%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/client.go (64%) create mode 100644 vendor/github.com/hashicorp/go-getter/v2/client_option.go rename vendor/github.com/hashicorp/go-getter/{ => v2}/client_option_progress.go (71%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/common.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/copy_dir.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/decompress.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/decompress_bzip2.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/decompress_gzip.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/decompress_tar.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/decompress_tbz2.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/decompress_testing.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/decompress_tgz.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/decompress_txz.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/decompress_xz.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/decompress_zip.go (98%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/detect.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/detect_bitbucket.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/detect_file.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/detect_gcs.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/detect_git.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/detect_github.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/detect_s3.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/detect_ssh.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/folder_storage.go (89%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/get.go (81%) create mode 100644 vendor/github.com/hashicorp/go-getter/v2/get_base.go rename vendor/github.com/hashicorp/go-getter/{get_file_windows.go => v2/get_file.go} (54%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/get_file_copy.go (100%) create mode 100644 vendor/github.com/hashicorp/go-getter/v2/get_file_symlink.go create mode 100644 vendor/github.com/hashicorp/go-getter/v2/get_file_symlink_windows.go rename vendor/github.com/hashicorp/go-getter/{ => v2}/get_gcs.go (82%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/get_git.go (76%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/get_hg.go (77%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/get_http.go (76%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/get_mock.go (60%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/get_s3.go (86%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/go.mod (90%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/go.sum (100%) create mode 100644 vendor/github.com/hashicorp/go-getter/v2/helper/url/url.go create mode 100644 vendor/github.com/hashicorp/go-getter/v2/helper/url/url_unix.go create mode 100644 vendor/github.com/hashicorp/go-getter/v2/helper/url/url_windows.go rename vendor/github.com/hashicorp/go-getter/{client_mode.go => v2/mode.go} (53%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/netrc.go (100%) create mode 100644 vendor/github.com/hashicorp/go-getter/v2/request.go rename vendor/github.com/hashicorp/go-getter/{ => v2}/source.go (100%) rename vendor/github.com/hashicorp/go-getter/{ => v2}/storage.go (85%) diff --git a/common/iso_config.go b/common/iso_config.go index 0edd86bd8..08f355b34 100644 --- a/common/iso_config.go +++ b/common/iso_config.go @@ -11,7 +11,7 @@ import ( "os" "strings" - getter "github.com/hashicorp/go-getter" + getter "github.com/hashicorp/go-getter/v2" "github.com/hashicorp/packer/template/interpolate" ) diff --git a/common/step_download.go b/common/step_download.go index 62d68a186..c15fc566d 100644 --- a/common/step_download.go +++ b/common/step_download.go @@ -11,8 +11,8 @@ import ( "runtime" "strings" - getter "github.com/hashicorp/go-getter" - urlhelper "github.com/hashicorp/go-getter/helper/url" + getter "github.com/hashicorp/go-getter/v2" + urlhelper "github.com/hashicorp/go-getter/v2/helper/url" "github.com/hashicorp/packer/common/filelock" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" @@ -52,6 +52,8 @@ type StepDownload struct { Extension string } +var defaultGetterClient = getter.Client{} + func (s *StepDownload) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { if len(s.Url) == 0 { log.Printf("No URLs were provided to Step Download. Continuing...") @@ -96,24 +98,6 @@ func (s *StepDownload) Run(ctx context.Context, state multistep.StateBag) multis return multistep.ActionHalt } -var ( - getters = getter.Getters -) - -func init() { - if runtime.GOOS == "windows" { - getters["file"] = &getter.FileGetter{ - // always copy local files instead of symlinking to fix GH-7534. The - // longer term fix for this would be to change the go-getter so that it - // can leave the source file where it is & tell us where it is. - Copy: true, - } - getters["smb"] = &getter.FileGetter{ - Copy: true, - } - } -} - func (s *StepDownload) download(ctx context.Context, ui packer.Ui, source string) (string, error) { if runtime.GOOS == "windows" { // Check that the user specified a UNC path, and promote it to an smb:// uri. @@ -208,20 +192,21 @@ func (s *StepDownload) download(ctx context.Context, ui packer.Ui, source string } ui.Say(fmt.Sprintf("Trying %s", u.String())) - gc := getter.Client{ - Ctx: ctx, + req := &getter.Request{ Dst: targetPath, Src: src, ProgressListener: ui, Pwd: wd, - Dir: false, - Getters: getters, + Mode: getter.ModeFile, + } + if runtime.GOOS == "windows" { + req.Inplace = true } - switch err := gc.Get(); err.(type) { + switch op, err := defaultGetterClient.Get(ctx, req); err.(type) { case nil: // success ! - ui.Say(fmt.Sprintf("%s => %s", u.String(), targetPath)) - return targetPath, nil + ui.Say(fmt.Sprintf("%s => %s", u.String(), op.Dst)) + return op.Dst, nil case *getter.ChecksumError: ui.Say(fmt.Sprintf("Checksum did not match, removing %s", targetPath)) if err := os.Remove(targetPath); err != nil { diff --git a/common/step_download_test.go b/common/step_download_test.go index 1652597f3..ce7961c1f 100644 --- a/common/step_download_test.go +++ b/common/step_download_test.go @@ -16,7 +16,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - urlhelper "github.com/hashicorp/go-getter/helper/url" + urlhelper "github.com/hashicorp/go-getter/v2/helper/url" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer/tmp" ) diff --git a/go.mod b/go.mod index 458eac26c..d2b8b6ad2 100644 --- a/go.mod +++ b/go.mod @@ -72,7 +72,8 @@ require ( github.com/hashicorp/go-cty-funcs/encoding v0.0.0-20200203151509-c92509f48b18 github.com/hashicorp/go-cty-funcs/filesystem v0.0.0-20200203151509-c92509f48b18 github.com/hashicorp/go-cty-funcs/uuid v0.0.0-20200203151509-c92509f48b18 - github.com/hashicorp/go-getter v1.3.1-0.20190906090232-a0f878cb75da + github.com/hashicorp/go-getter v1.3.1-0.20190906090232-a0f878cb75da // indirect + github.com/hashicorp/go-getter/v2 v2.0.0-20200206160058-e2a28063d6e7 github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-oracle-terraform v0.0.0-20181016190316-007121241b79 github.com/hashicorp/go-retryablehttp v0.5.2 // indirect diff --git a/go.sum b/go.sum index f68d86128..8c4e7615d 100644 --- a/go.sum +++ b/go.sum @@ -239,6 +239,8 @@ github.com/hashicorp/go-cty-funcs/uuid v0.0.0-20200203151509-c92509f48b18 h1:CxY github.com/hashicorp/go-cty-funcs/uuid v0.0.0-20200203151509-c92509f48b18/go.mod h1:QFbv9KeSic7KIgfOYbUW02G4LxOf3Fh9Ylm4n174LUQ= github.com/hashicorp/go-getter v1.3.1-0.20190906090232-a0f878cb75da h1:HAasZmyRrb7/paYuww5RfVwY3wkFpsbMNYwBxOSZquY= github.com/hashicorp/go-getter v1.3.1-0.20190906090232-a0f878cb75da/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= +github.com/hashicorp/go-getter/v2 v2.0.0-20200206160058-e2a28063d6e7 h1:ODZKizgWGz4diUEZwCgf8qgIn/D+qVW/JOdVVV/z7k8= +github.com/hashicorp/go-getter/v2 v2.0.0-20200206160058-e2a28063d6e7/go.mod h1:jlmxRRjTpY0KdWrV1Uq38GUVskrjIZUrjOAybo0OArw= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= diff --git a/packer/ui.go b/packer/ui.go index d2f7bdca2..bbbf01d61 100644 --- a/packer/ui.go +++ b/packer/ui.go @@ -15,7 +15,7 @@ import ( "time" "unicode" - getter "github.com/hashicorp/go-getter" + getter "github.com/hashicorp/go-getter/v2" ) var ErrInterrupted = errors.New("interrupted") diff --git a/vendor/github.com/hashicorp/go-getter/.travis.yml b/vendor/github.com/hashicorp/go-getter/.travis.yml deleted file mode 100644 index 4fe9176aa..000000000 --- a/vendor/github.com/hashicorp/go-getter/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -sudo: false - -addons: - apt: - sources: - - sourceline: 'ppa:git-core/ppa' - packages: - - git - -language: go - -os: - - linux - - osx - -go: - - "1.11.x" - -before_script: - - go build ./cmd/go-getter - -branches: - only: - - master diff --git a/vendor/github.com/hashicorp/go-getter/client_option.go b/vendor/github.com/hashicorp/go-getter/client_option.go deleted file mode 100644 index c1ee413b0..000000000 --- a/vendor/github.com/hashicorp/go-getter/client_option.go +++ /dev/null @@ -1,46 +0,0 @@ -package getter - -import "context" - -// A ClientOption allows to configure a client -type ClientOption func(*Client) error - -// Configure configures a client with options. -func (c *Client) Configure(opts ...ClientOption) error { - if c.Ctx == nil { - c.Ctx = context.Background() - } - c.Options = opts - for _, opt := range opts { - err := opt(c) - if err != nil { - return err - } - } - // Default decompressor values - if c.Decompressors == nil { - c.Decompressors = Decompressors - } - // Default detector values - if c.Detectors == nil { - c.Detectors = Detectors - } - // Default getter values - if c.Getters == nil { - c.Getters = Getters - } - - for _, getter := range c.Getters { - getter.SetClient(c) - } - return nil -} - -// WithContext allows to pass a context to operation -// in order to be able to cancel a download in progress. -func WithContext(ctx context.Context) func(*Client) error { - return func(c *Client) error { - c.Ctx = ctx - return nil - } -} diff --git a/vendor/github.com/hashicorp/go-getter/get_base.go b/vendor/github.com/hashicorp/go-getter/get_base.go deleted file mode 100644 index 09e9b6313..000000000 --- a/vendor/github.com/hashicorp/go-getter/get_base.go +++ /dev/null @@ -1,20 +0,0 @@ -package getter - -import "context" - -// getter is our base getter; it regroups -// fields all getters have in common. -type getter struct { - client *Client -} - -func (g *getter) SetClient(c *Client) { g.client = c } - -// Context tries to returns the Contex from the getter's -// client. otherwise context.Background() is returned. -func (g *getter) Context() context.Context { - if g == nil || g.client == nil { - return context.Background() - } - return g.client.Ctx -} diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go deleted file mode 100644 index 78660839a..000000000 --- a/vendor/github.com/hashicorp/go-getter/get_file.go +++ /dev/null @@ -1,36 +0,0 @@ -package getter - -import ( - "net/url" - "os" -) - -// FileGetter is a Getter implementation that will download a module from -// a file scheme. -type FileGetter struct { - getter - - // Copy, if set to true, will copy data instead of using a symlink. If - // false, attempts to symlink to speed up the operation and to lower the - // disk space usage. If the symlink fails, may attempt to copy on windows. - Copy bool -} - -func (g *FileGetter) ClientMode(u *url.URL) (ClientMode, error) { - path := u.Path - if u.RawPath != "" { - path = u.RawPath - } - - fi, err := os.Stat(path) - if err != nil { - return 0, err - } - - // Check if the source is a directory. - if fi.IsDir() { - return ClientModeDir, nil - } - - return ClientModeFile, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go deleted file mode 100644 index c3b28ae51..000000000 --- a/vendor/github.com/hashicorp/go-getter/get_file_unix.go +++ /dev/null @@ -1,103 +0,0 @@ -// +build !windows - -package getter - -import ( - "fmt" - "net/url" - "os" - "path/filepath" -) - -func (g *FileGetter) Get(dst string, u *url.URL) error { - path := u.Path - if u.RawPath != "" { - path = u.RawPath - } - - // The source path must exist and be a directory to be usable. - if fi, err := os.Stat(path); err != nil { - return fmt.Errorf("source path error: %s", err) - } else if !fi.IsDir() { - return fmt.Errorf("source path must be a directory") - } - - fi, err := os.Lstat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - // If the destination already exists, it must be a symlink - if err == nil { - mode := fi.Mode() - if mode&os.ModeSymlink == 0 { - return fmt.Errorf("destination exists and is not a symlink") - } - - // Remove the destination - if err := os.Remove(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return err - } - - return os.Symlink(path, dst) -} - -func (g *FileGetter) GetFile(dst string, u *url.URL) error { - ctx := g.Context() - path := u.Path - if u.RawPath != "" { - path = u.RawPath - } - - // The source path must exist and be a file to be usable. - if fi, err := os.Stat(path); err != nil { - return fmt.Errorf("source path error: %s", err) - } else if fi.IsDir() { - return fmt.Errorf("source path must be a file") - } - - _, err := os.Lstat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - // If the destination already exists, it must be a symlink - if err == nil { - // Remove the destination - if err := os.Remove(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return err - } - - // If we're not copying, just symlink and we're done - if !g.Copy { - return os.Symlink(path, dst) - } - - // Copy - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dst) - if err != nil { - return err - } - defer dstF.Close() - - _, err = Copy(ctx, dstF, srcF) - return err -} diff --git a/vendor/github.com/hashicorp/go-getter/v2/.gitignore b/vendor/github.com/hashicorp/go-getter/v2/.gitignore new file mode 100644 index 000000000..22d0d82f8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/v2/.gitignore @@ -0,0 +1 @@ +vendor diff --git a/vendor/github.com/hashicorp/go-getter/v2/LICENSE b/vendor/github.com/hashicorp/go-getter/v2/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/v2/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/v2/README.md similarity index 95% rename from vendor/github.com/hashicorp/go-getter/README.md rename to vendor/github.com/hashicorp/go-getter/v2/README.md index 3de23c709..bbcd15de9 100644 --- a/vendor/github.com/hashicorp/go-getter/README.md +++ b/vendor/github.com/hashicorp/go-getter/v2/README.md @@ -1,10 +1,10 @@ # go-getter -[![Build Status](http://img.shields.io/travis/hashicorp/go-getter.svg?style=flat-square)][travis] +[![CircleCI](https://circleci.com/gh/hashicorp/go-getter/tree/master.svg?style=svg)][circleci] [![Build status](https://ci.appveyor.com/api/projects/status/ulq3qr43n62croyq/branch/master?svg=true)][appveyor] [![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] -[travis]: http://travis-ci.org/hashicorp/go-getter +[circleci]: https://circleci.com/gh/hashicorp/go-getter/tree/master [godocs]: http://godoc.org/github.com/hashicorp/go-getter [appveyor]: https://ci.appveyor.com/project/hashicorp/go-getter/branch/master @@ -356,3 +356,7 @@ In order to access to GCS, authentication credentials should be provided. More i - gcs::https://www.googleapis.com/storage/v1/bucket - gcs::https://www.googleapis.com/storage/v1/bucket/foo.zip - www.googleapis.com/storage/v1/bucket/foo + +#### GCS Testing + +The tests for `get_gcs.go` require you to have GCP credentials set in your environment. These credentials can have any level of permissions to any project, they just need to exist. This means setting `GOOGLE_APPLICATION_CREDENTIALS="~/path/to/credentials.json"` or `GOOGLE_CREDENTIALS="{stringified-credentials-json}"`. Due to this configuration, `get_gcs_test.go` will fail for external contributors in CircleCI. diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/v2/appveyor.yml similarity index 100% rename from vendor/github.com/hashicorp/go-getter/appveyor.yml rename to vendor/github.com/hashicorp/go-getter/v2/appveyor.yml diff --git a/vendor/github.com/hashicorp/go-getter/checksum.go b/vendor/github.com/hashicorp/go-getter/v2/checksum.go similarity index 87% rename from vendor/github.com/hashicorp/go-getter/checksum.go rename to vendor/github.com/hashicorp/go-getter/v2/checksum.go index eeccfea9d..d10ab2c0b 100644 --- a/vendor/github.com/hashicorp/go-getter/checksum.go +++ b/vendor/github.com/hashicorp/go-getter/v2/checksum.go @@ -3,6 +3,7 @@ package getter import ( "bufio" "bytes" + "context" "crypto/md5" "crypto/sha1" "crypto/sha256" @@ -93,7 +94,7 @@ func (c *FileChecksum) checksum(source string) error { // *file2 // // see parseChecksumLine for more detail on checksum file parsing -func (c *Client) extractChecksum(u *url.URL) (*FileChecksum, error) { +func (c *Client) extractChecksum(ctx context.Context, u *url.URL) (*FileChecksum, error) { q := u.Query() v := q.Get("checksum") @@ -115,7 +116,7 @@ func (c *Client) extractChecksum(u *url.URL) (*FileChecksum, error) { switch checksumType { case "file": - return c.ChecksumFromFile(checksumValue, u) + return c.ChecksumFromFile(ctx, checksumValue, u.Path) default: return newChecksumFromType(checksumType, checksumValue, filepath.Base(u.EscapedPath())) } @@ -183,15 +184,16 @@ func newChecksumFromValue(checksumValue, filename string) (*FileChecksum, error) return c, nil } -// ChecksumFromFile will return all the FileChecksums found in file +// ChecksumFromFile will return the first file checksum found in the +// `checksumURL` file that corresponds to the `checksummedPath` path. // -// ChecksumFromFile will try to guess the hashing algorithm based on content -// of checksum file +// ChecksumFromFile will infer the hashing algorithm based on the checksumURL +// file content. // -// ChecksumFromFile will only return checksums for files that match file -// behind src -func (c *Client) ChecksumFromFile(checksumFile string, src *url.URL) (*FileChecksum, error) { - checksumFileURL, err := urlhelper.Parse(checksumFile) +// ChecksumFromFile will only return checksums for files that match +// checksummedPath, which is the object being checksummed. +func (c *Client) ChecksumFromFile(ctx context.Context, checksumURL, checksummedPath string) (*FileChecksum, error) { + checksumFileURL, err := urlhelper.Parse(checksumURL) if err != nil { return nil, err } @@ -202,24 +204,20 @@ func (c *Client) ChecksumFromFile(checksumFile string, src *url.URL) (*FileCheck } defer os.Remove(tempfile) - c2 := &Client{ - Ctx: c.Ctx, - Getters: c.Getters, - Decompressors: c.Decompressors, - Detectors: c.Detectors, - Pwd: c.Pwd, - Dir: false, - Src: checksumFile, - Dst: tempfile, - ProgressListener: c.ProgressListener, + req := &Request{ + // Pwd: c.Pwd, TODO(adrien): pass pwd ? + Mode: ModeFile, + Src: checksumURL, + Dst: tempfile, + // ProgressListener: c.ProgressListener, TODO(adrien): pass progress bar ? } - if err = c2.Get(); err != nil { + if _, err = c.Get(ctx, req); err != nil { return nil, fmt.Errorf( "Error downloading checksum file: %s", err) } - filename := filepath.Base(src.Path) - absPath, err := filepath.Abs(src.Path) + filename := filepath.Base(checksummedPath) + absPath, err := filepath.Abs(checksummedPath) if err != nil { return nil, err } @@ -277,7 +275,7 @@ func (c *Client) ChecksumFromFile(checksumFile string, src *url.URL) (*FileCheck } } } - return nil, fmt.Errorf("no checksum found in: %s", checksumFile) + return nil, fmt.Errorf("no checksum found in: %s", checksumURL) } // parseChecksumLine takes a line from a checksum file and returns diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/v2/client.go similarity index 64% rename from vendor/github.com/hashicorp/go-getter/client.go rename to vendor/github.com/hashicorp/go-getter/v2/client.go index 007a78ba7..a6a1bf2bd 100644 --- a/vendor/github.com/hashicorp/go-getter/client.go +++ b/vendor/github.com/hashicorp/go-getter/v2/client.go @@ -19,25 +19,6 @@ import ( // Using a client directly allows more fine-grained control over how downloading // is done, as well as customizing the protocols supported. type Client struct { - // Ctx for cancellation - Ctx context.Context - - // Src is the source URL to get. - // - // Dst is the path to save the downloaded thing as. If Dir is set to - // true, then this should be a directory. If the directory doesn't exist, - // it will be created for you. - // - // Pwd is the working directory for detection. If this isn't set, some - // detection may fail. Client will not default pwd to the current - // working directory for security reasons. - Src string - Dst string - Pwd string - - // Mode is the method of download the client will use. See ClientMode - // for documentation. - Mode ClientMode // Detectors is the list of detectors that are tried on the source. // If this is nil, then the default Detectors will be used. @@ -50,78 +31,66 @@ type Client struct { // Getters is the map of protocols supported by this client. If this // is nil, then the default Getters variable will be used. Getters map[string]Getter +} - // Dir, if true, tells the Client it is downloading a directory (versus - // a single file). This distinction is necessary since filenames and - // directory names follow the same format so disambiguating is impossible - // without knowing ahead of time. - // - // WARNING: deprecated. If Mode is set, that will take precedence. - Dir bool - - // ProgressListener allows to track file downloads. - // By default a no op progress listener is used. - ProgressListener ProgressTracker - - Options []ClientOption +// GetResult is the result of a Client.Get +type GetResult struct { + // Local destination of the gotten object. + Dst string } // Get downloads the configured source to the destination. -func (c *Client) Get() error { - if err := c.Configure(c.Options...); err != nil { - return err +func (c *Client) Get(ctx context.Context, req *Request) (*GetResult, error) { + if err := c.configure(); err != nil { + return nil, err } // Store this locally since there are cases we swap this - mode := c.Mode - if mode == ClientModeInvalid { - if c.Dir { - mode = ClientModeDir - } else { - mode = ClientModeFile - } + if req.Mode == ModeInvalid { + req.Mode = ModeAny } - src, err := Detect(c.Src, c.Pwd, c.Detectors) + var err error + req.Src, err = Detect(req.Src, req.Pwd, c.Detectors) if err != nil { - return err + return nil, err } + var force string // Determine if we have a forced protocol, i.e. "git::http://..." - force, src := getForcedGetter(src) + force, req.Src = getForcedGetter(req.Src) // If there is a subdir component, then we download the root separately // and then copy over the proper subdir. - var realDst string - dst := c.Dst - src, subDir := SourceDirSubdir(src) + var realDst, subDir string + req.Src, subDir = SourceDirSubdir(req.Src) if subDir != "" { td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { - return err + return nil, err } defer tdcloser.Close() - realDst = dst - dst = td + realDst = req.Dst + req.Dst = td } - u, err := urlhelper.Parse(src) + req.u, err = urlhelper.Parse(req.Src) if err != nil { - return err + return nil, err } if force == "" { - force = u.Scheme + force = req.u.Scheme } g, ok := c.Getters[force] if !ok { - return fmt.Errorf( + return nil, fmt.Errorf( "download not supported for scheme '%s'", force) } // We have magic query parameters that we use to signal different features - q := u.Query() + q := req.u.Query() // Determine if we have an archive type archiveV := q.Get("archive") @@ -129,7 +98,7 @@ func (c *Client) Get() error { // Delete the paramter since it is a magic parameter we don't // want to pass on to the Getter q.Del("archive") - u.RawQuery = q.Encode() + req.u.RawQuery = q.Encode() // If we can parse the value as a bool and it is false, then // set the archive to "-" which should never map to a decompressor @@ -141,7 +110,7 @@ func (c *Client) Get() error { // We don't appear to... but is it part of the filename? matchingLen := 0 for k := range c.Decompressors { - if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen { + if strings.HasSuffix(req.u.Path, "."+k) && len(k) > matchingLen { archiveV = k matchingLen = len(k) } @@ -159,73 +128,73 @@ func (c *Client) Get() error { // this at the end of everything. td, err := ioutil.TempDir("", "getter") if err != nil { - return fmt.Errorf( + return nil, fmt.Errorf( "Error creating temporary directory for archive: %s", err) } defer os.RemoveAll(td) // Swap the download directory to be our temporary path and // store the old values. - decompressDst = dst - decompressDir = mode != ClientModeFile - dst = filepath.Join(td, "archive") - mode = ClientModeFile + decompressDst = req.Dst + decompressDir = req.Mode != ModeFile + req.Dst = filepath.Join(td, "archive") + req.Mode = ModeFile } // Determine checksum if we have one - checksum, err := c.extractChecksum(u) + checksum, err := c.extractChecksum(ctx, req.u) if err != nil { - return fmt.Errorf("invalid checksum: %s", err) + return nil, fmt.Errorf("invalid checksum: %s", err) } // Delete the query parameter if we have it. q.Del("checksum") - u.RawQuery = q.Encode() + req.u.RawQuery = q.Encode() - if mode == ClientModeAny { + if req.Mode == ModeAny { // Ask the getter which client mode to use - mode, err = g.ClientMode(u) + req.Mode, err = g.Mode(ctx, req.u) if err != nil { - return err + return nil, err } // Destination is the base name of the URL path in "any" mode when // a file source is detected. - if mode == ClientModeFile { - filename := filepath.Base(u.Path) + if req.Mode == ModeFile { + filename := filepath.Base(req.u.Path) // Determine if we have a custom file name if v := q.Get("filename"); v != "" { // Delete the query parameter if we have it. q.Del("filename") - u.RawQuery = q.Encode() + req.u.RawQuery = q.Encode() filename = v } - dst = filepath.Join(dst, filename) + req.Dst = filepath.Join(req.Dst, filename) } } // If we're not downloading a directory, then just download the file // and return. - if mode == ClientModeFile { + if req.Mode == ModeFile { getFile := true if checksum != nil { - if err := checksum.checksum(dst); err == nil { + if err := checksum.checksum(req.Dst); err == nil { // don't get the file if the checksum of dst is correct getFile = false } } if getFile { - err := g.GetFile(dst, u) + err := g.GetFile(ctx, req) if err != nil { - return err + return nil, err } if checksum != nil { - if err := checksum.checksum(dst); err != nil { - return err + if err := checksum.checksum(req.Dst); err != nil { + return nil, err } } } @@ -233,25 +202,25 @@ func (c *Client) Get() error { if decompressor != nil { // We have a decompressor, so decompress the current destination // into the final destination with the proper mode. - err := decompressor.Decompress(decompressDst, dst, decompressDir) + err := decompressor.Decompress(decompressDst, req.Dst, decompressDir) if err != nil { - return err + return nil, err } // Swap the information back - dst = decompressDst + req.Dst = decompressDst if decompressDir { - mode = ClientModeAny + req.Mode = ModeAny } else { - mode = ClientModeFile + req.Mode = ModeFile } } // We check the dir value again because it can be switched back // if we were unarchiving. If we're still only Get-ing a file, then // we're done. - if mode == ClientModeFile { - return nil + if req.Mode == ModeFile { + return &GetResult{req.Dst}, nil } } @@ -263,36 +232,36 @@ func (c *Client) Get() error { // If we're getting a directory, then this is an error. You cannot // checksum a directory. TODO: test if checksum != nil { - return fmt.Errorf( + return nil, fmt.Errorf( "checksum cannot be specified for directory download") } // We're downloading a directory, which might require a bit more work // if we're specifying a subdir. - err := g.Get(dst, u) + err := g.Get(ctx, req) if err != nil { - err = fmt.Errorf("error downloading '%s': %s", src, err) - return err + err = fmt.Errorf("error downloading '%s': %s", req.Src, err) + return nil, err } } // If we have a subdir, copy that over if subDir != "" { if err := os.RemoveAll(realDst); err != nil { - return err + return nil, err } if err := os.MkdirAll(realDst, 0755); err != nil { - return err + return nil, err } // Process any globs - subDir, err := SubdirGlob(dst, subDir) + subDir, err := SubdirGlob(req.Dst, subDir) if err != nil { - return err + return nil, err } - return copyDir(c.Ctx, realDst, subDir, false) + return &GetResult{realDst}, copyDir(ctx, realDst, subDir, false) } - return nil + return &GetResult{req.Dst}, nil } diff --git a/vendor/github.com/hashicorp/go-getter/v2/client_option.go b/vendor/github.com/hashicorp/go-getter/v2/client_option.go new file mode 100644 index 000000000..567f3c8ac --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/v2/client_option.go @@ -0,0 +1,22 @@ +package getter + +// configure configures a client with options. +func (c *Client) configure() error { + // Default decompressor values + if c.Decompressors == nil { + c.Decompressors = Decompressors + } + // Default detector values + if c.Detectors == nil { + c.Detectors = Detectors + } + // Default getter values + if c.Getters == nil { + c.Getters = Getters + } + + for _, getter := range c.Getters { + getter.SetClient(c) + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/client_option_progress.go b/vendor/github.com/hashicorp/go-getter/v2/client_option_progress.go similarity index 71% rename from vendor/github.com/hashicorp/go-getter/client_option_progress.go rename to vendor/github.com/hashicorp/go-getter/v2/client_option_progress.go index 9b185f71d..1ec9aa1e9 100644 --- a/vendor/github.com/hashicorp/go-getter/client_option_progress.go +++ b/vendor/github.com/hashicorp/go-getter/v2/client_option_progress.go @@ -4,18 +4,6 @@ import ( "io" ) -// WithProgress allows for a user to track -// the progress of a download. -// For example by displaying a progress bar with -// current download. -// Not all getters have progress support yet. -func WithProgress(pl ProgressTracker) func(*Client) error { - return func(c *Client) error { - c.ProgressListener = pl - return nil - } -} - // ProgressTracker allows to track the progress of downloads. type ProgressTracker interface { // TrackProgress should be called when diff --git a/vendor/github.com/hashicorp/go-getter/common.go b/vendor/github.com/hashicorp/go-getter/v2/common.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/common.go rename to vendor/github.com/hashicorp/go-getter/v2/common.go diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/v2/copy_dir.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/copy_dir.go rename to vendor/github.com/hashicorp/go-getter/v2/copy_dir.go diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/v2/decompress.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/decompress.go rename to vendor/github.com/hashicorp/go-getter/v2/decompress.go diff --git a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go b/vendor/github.com/hashicorp/go-getter/v2/decompress_bzip2.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/decompress_bzip2.go rename to vendor/github.com/hashicorp/go-getter/v2/decompress_bzip2.go diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/v2/decompress_gzip.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/decompress_gzip.go rename to vendor/github.com/hashicorp/go-getter/v2/decompress_gzip.go diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/vendor/github.com/hashicorp/go-getter/v2/decompress_tar.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/decompress_tar.go rename to vendor/github.com/hashicorp/go-getter/v2/decompress_tar.go diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/v2/decompress_tbz2.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/decompress_tbz2.go rename to vendor/github.com/hashicorp/go-getter/v2/decompress_tbz2.go diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/v2/decompress_testing.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/decompress_testing.go rename to vendor/github.com/hashicorp/go-getter/v2/decompress_testing.go diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/v2/decompress_tgz.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/decompress_tgz.go rename to vendor/github.com/hashicorp/go-getter/v2/decompress_tgz.go diff --git a/vendor/github.com/hashicorp/go-getter/decompress_txz.go b/vendor/github.com/hashicorp/go-getter/v2/decompress_txz.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/decompress_txz.go rename to vendor/github.com/hashicorp/go-getter/v2/decompress_txz.go diff --git a/vendor/github.com/hashicorp/go-getter/decompress_xz.go b/vendor/github.com/hashicorp/go-getter/v2/decompress_xz.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/decompress_xz.go rename to vendor/github.com/hashicorp/go-getter/v2/decompress_xz.go diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/v2/decompress_zip.go similarity index 98% rename from vendor/github.com/hashicorp/go-getter/decompress_zip.go rename to vendor/github.com/hashicorp/go-getter/v2/decompress_zip.go index 0830f7914..650a852f3 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_zip.go +++ b/vendor/github.com/hashicorp/go-getter/v2/decompress_zip.go @@ -8,8 +8,8 @@ import ( "path/filepath" ) -// ZipDecompressor is an implementation of Decompressor that can -// decompress zip files. +// ZipDecompressor is an implementation of Decompressor that can decompress zip +// files. type ZipDecompressor struct{} func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/v2/detect.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/detect.go rename to vendor/github.com/hashicorp/go-getter/v2/detect.go diff --git a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go b/vendor/github.com/hashicorp/go-getter/v2/detect_bitbucket.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/detect_bitbucket.go rename to vendor/github.com/hashicorp/go-getter/v2/detect_bitbucket.go diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/v2/detect_file.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/detect_file.go rename to vendor/github.com/hashicorp/go-getter/v2/detect_file.go diff --git a/vendor/github.com/hashicorp/go-getter/detect_gcs.go b/vendor/github.com/hashicorp/go-getter/v2/detect_gcs.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/detect_gcs.go rename to vendor/github.com/hashicorp/go-getter/v2/detect_gcs.go diff --git a/vendor/github.com/hashicorp/go-getter/detect_git.go b/vendor/github.com/hashicorp/go-getter/v2/detect_git.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/detect_git.go rename to vendor/github.com/hashicorp/go-getter/v2/detect_git.go diff --git a/vendor/github.com/hashicorp/go-getter/detect_github.go b/vendor/github.com/hashicorp/go-getter/v2/detect_github.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/detect_github.go rename to vendor/github.com/hashicorp/go-getter/v2/detect_github.go diff --git a/vendor/github.com/hashicorp/go-getter/detect_s3.go b/vendor/github.com/hashicorp/go-getter/v2/detect_s3.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/detect_s3.go rename to vendor/github.com/hashicorp/go-getter/v2/detect_s3.go diff --git a/vendor/github.com/hashicorp/go-getter/detect_ssh.go b/vendor/github.com/hashicorp/go-getter/v2/detect_ssh.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/detect_ssh.go rename to vendor/github.com/hashicorp/go-getter/v2/detect_ssh.go diff --git a/vendor/github.com/hashicorp/go-getter/folder_storage.go b/vendor/github.com/hashicorp/go-getter/v2/folder_storage.go similarity index 89% rename from vendor/github.com/hashicorp/go-getter/folder_storage.go rename to vendor/github.com/hashicorp/go-getter/v2/folder_storage.go index 647ccf459..891eda956 100644 --- a/vendor/github.com/hashicorp/go-getter/folder_storage.go +++ b/vendor/github.com/hashicorp/go-getter/v2/folder_storage.go @@ -1,6 +1,7 @@ package getter import ( + "context" "crypto/md5" "encoding/hex" "fmt" @@ -39,7 +40,7 @@ func (s *FolderStorage) Dir(key string) (d string, e bool, err error) { } // Get implements Storage.Get -func (s *FolderStorage) Get(key string, source string, update bool) error { +func (s *FolderStorage) Get(ctx context.Context, key string, source string, update bool) error { dir := s.dir(key) if !update { if _, err := os.Stat(dir); err == nil { @@ -54,7 +55,8 @@ func (s *FolderStorage) Get(key string, source string, update bool) error { } // Get the source. This always forces an update. - return Get(dir, source) + _, err := Get(ctx, dir, source) + return err } // dir returns the directory name internally that we'll use to map to diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/v2/get.go similarity index 81% rename from vendor/github.com/hashicorp/go-getter/get.go rename to vendor/github.com/hashicorp/go-getter/v2/get.go index c233763c6..c82f52f05 100644 --- a/vendor/github.com/hashicorp/go-getter/get.go +++ b/vendor/github.com/hashicorp/go-getter/v2/get.go @@ -13,6 +13,7 @@ package getter import ( "bytes" + "context" "fmt" "net/url" "os/exec" @@ -31,16 +32,16 @@ type Getter interface { // The directory may already exist (if we're updating). If it is in a // format that isn't understood, an error should be returned. Get shouldn't // simply nuke the directory. - Get(string, *url.URL) error + Get(context.Context, *Request) error // GetFile downloads the give URL into the given path. The URL must // reference a single file. If possible, the Getter should check if // the remote end contains the same file and no-op this operation. - GetFile(string, *url.URL) error + GetFile(context.Context, *Request) error - // ClientMode returns the mode based on the given URL. This is used to + // Mode returns the mode based on the given URL. This is used to // allow clients to let the getters decide which mode to use. - ClientMode(*url.URL) (ClientMode, error) + Mode(context.Context, *url.URL) (Mode, error) // SetClient allows a getter to know it's client // in order to access client's Get functions or @@ -59,6 +60,12 @@ var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`) // httpClient is the default client to be used by HttpGetters. var httpClient = cleanhttp.DefaultClient() +var DefaultClient = &Client{ + Getters: Getters, + Detectors: Detectors, + Decompressors: Decompressors, +} + func init() { httpGetter := &HttpGetter{ Netrc: true, @@ -80,13 +87,13 @@ func init() { // // src is a URL, whereas dst is always just a file path to a folder. This // folder doesn't need to exist. It will be created if it doesn't exist. -func Get(dst, src string, opts ...ClientOption) error { - return (&Client{ - Src: src, - Dst: dst, - Dir: true, - Options: opts, - }).Get() +func Get(ctx context.Context, dst, src string) (*GetResult, error) { + req := &Request{ + Src: src, + Dst: dst, + Mode: ModeDir, + } + return DefaultClient.Get(ctx, req) } // GetAny downloads a URL into the given destination. Unlike Get or @@ -95,24 +102,24 @@ func Get(dst, src string, opts ...ClientOption) error { // dst must be a directory. If src is a file, it will be downloaded // into dst with the basename of the URL. If src is a directory or // archive, it will be unpacked directly into dst. -func GetAny(dst, src string, opts ...ClientOption) error { - return (&Client{ - Src: src, - Dst: dst, - Mode: ClientModeAny, - Options: opts, - }).Get() +func GetAny(ctx context.Context, dst, src string) (*GetResult, error) { + req := &Request{ + Src: src, + Dst: dst, + Mode: ModeAny, + } + return DefaultClient.Get(ctx, req) } // GetFile downloads the file specified by src into the path specified by // dst. -func GetFile(dst, src string, opts ...ClientOption) error { - return (&Client{ - Src: src, - Dst: dst, - Dir: false, - Options: opts, - }).Get() +func GetFile(ctx context.Context, dst, src string) (*GetResult, error) { + req := &Request{ + Src: src, + Dst: dst, + Mode: ModeFile, + } + return DefaultClient.Get(ctx, req) } // getRunCommand is a helper that will run a command and capture the output diff --git a/vendor/github.com/hashicorp/go-getter/v2/get_base.go b/vendor/github.com/hashicorp/go-getter/v2/get_base.go new file mode 100644 index 000000000..4a9ca733f --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/v2/get_base.go @@ -0,0 +1,9 @@ +package getter + +// getter is our base getter; it regroups +// fields all getters have in common. +type getter struct { + client *Client +} + +func (g *getter) SetClient(c *Client) { g.client = c } diff --git a/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/vendor/github.com/hashicorp/go-getter/v2/get_file.go similarity index 54% rename from vendor/github.com/hashicorp/go-getter/get_file_windows.go rename to vendor/github.com/hashicorp/go-getter/v2/get_file.go index 24f1acb17..f11d13ecf 100644 --- a/vendor/github.com/hashicorp/go-getter/get_file_windows.go +++ b/vendor/github.com/hashicorp/go-getter/v2/get_file.go @@ -1,24 +1,44 @@ -// +build windows - package getter import ( + "context" "fmt" "net/url" "os" - "os/exec" "path/filepath" - "strings" - "syscall" ) -func (g *FileGetter) Get(dst string, u *url.URL) error { - ctx := g.Context() +// FileGetter is a Getter implementation that will download a module from +// a file scheme. +type FileGetter struct { + getter +} + +func (g *FileGetter) Mode(ctx context.Context, u *url.URL) (Mode, error) { path := u.Path if u.RawPath != "" { path = u.RawPath } + fi, err := os.Stat(path) + if err != nil { + return 0, err + } + + // Check if the source is a directory. + if fi.IsDir() { + return ModeDir, nil + } + + return ModeFile, nil +} + +func (g *FileGetter) Get(ctx context.Context, req *Request) error { + path := req.u.Path + if req.u.RawPath != "" { + path = req.u.RawPath + } + // The source path must exist and be a directory to be usable. if fi, err := os.Stat(path); err != nil { return fmt.Errorf("source path error: %s", err) @@ -26,11 +46,16 @@ func (g *FileGetter) Get(dst string, u *url.URL) error { return fmt.Errorf("source path must be a directory") } - fi, err := os.Lstat(dst) + fi, err := os.Lstat(req.Dst) if err != nil && !os.IsNotExist(err) { return err } + if req.Inplace { + req.Dst = path + return nil + } + // If the destination already exists, it must be a symlink if err == nil { mode := fi.Mode() @@ -39,42 +64,38 @@ func (g *FileGetter) Get(dst string, u *url.URL) error { } // Remove the destination - if err := os.Remove(dst); err != nil { + if err := os.Remove(req.Dst); err != nil { return err } } // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(req.Dst), 0755); err != nil { return err } - sourcePath := toBackslash(path) - - // Use mklink to create a junction point - output, err := exec.CommandContext(ctx, "cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput() - if err != nil { - return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output) - } - - return nil + return SymlinkAny(path, req.Dst) } -func (g *FileGetter) GetFile(dst string, u *url.URL) error { - ctx := g.Context() - path := u.Path - if u.RawPath != "" { - path = u.RawPath +func (g *FileGetter) GetFile(ctx context.Context, req *Request) error { + path := req.u.Path + if req.u.RawPath != "" { + path = req.u.RawPath } - // The source path must exist and be a directory to be usable. + // The source path must exist and be a file to be usable. if fi, err := os.Stat(path); err != nil { return fmt.Errorf("source path error: %s", err) } else if fi.IsDir() { return fmt.Errorf("source path must be a file") } - _, err := os.Lstat(dst) + if req.Inplace { + req.Dst = path + return nil + } + + _, err := os.Lstat(req.Dst) if err != nil && !os.IsNotExist(err) { return err } @@ -82,19 +103,19 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { // If the destination already exists, it must be a symlink if err == nil { // Remove the destination - if err := os.Remove(dst); err != nil { + if err := os.Remove(req.Dst); err != nil { return err } } // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(req.Dst), 0755); err != nil { return err } // If we're not copying, just symlink and we're done - if !g.Copy { - if err = os.Symlink(path, dst); err == nil { + if !req.Copy { + if err = os.Symlink(path, req.Dst); err == nil { return err } lerr, ok := err.(*os.LinkError) @@ -102,8 +123,9 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { return err } switch lerr.Err { - case syscall.ERROR_PRIVILEGE_NOT_HELD: - // no symlink privilege, let's + case ErrUnauthorized: + // On windows this means we don't have + // symlink privilege, let's // fallback to a copy to avoid an error. break default: @@ -118,7 +140,7 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { } defer srcF.Close() - dstF, err := os.Create(dst) + dstF, err := os.Create(req.Dst) if err != nil { return err } @@ -127,10 +149,3 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { _, err = Copy(ctx, dstF, srcF) return err } - -// toBackslash returns the result of replacing each slash character -// in path with a backslash ('\') character. Multiple separators are -// replaced by multiple backslashes. -func toBackslash(path string) string { - return strings.Replace(path, "/", "\\", -1) -} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_copy.go b/vendor/github.com/hashicorp/go-getter/v2/get_file_copy.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/get_file_copy.go rename to vendor/github.com/hashicorp/go-getter/v2/get_file_copy.go diff --git a/vendor/github.com/hashicorp/go-getter/v2/get_file_symlink.go b/vendor/github.com/hashicorp/go-getter/v2/get_file_symlink.go new file mode 100644 index 000000000..12d296aca --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/v2/get_file_symlink.go @@ -0,0 +1,10 @@ +// +build !windows + +package getter + +import ( + "os" +) + +var ErrUnauthorized = os.ErrPermission +var SymlinkAny = os.Symlink diff --git a/vendor/github.com/hashicorp/go-getter/v2/get_file_symlink_windows.go b/vendor/github.com/hashicorp/go-getter/v2/get_file_symlink_windows.go new file mode 100644 index 000000000..dd0b7fe84 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/v2/get_file_symlink_windows.go @@ -0,0 +1,21 @@ +package getter + +import ( + "fmt" + "os/exec" + "path/filepath" + "syscall" +) + +func SymlinkAny(oldname, newname string) error { + sourcePath := filepath.FromSlash(oldname) + + // Use mklink to create a junction point + output, err := exec.Command("cmd", "/c", "mklink", "/J", newname, sourcePath).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to run mklink %v %v: %v %q", newname, sourcePath, err, output) + } + return nil +} + +var ErrUnauthorized = syscall.ERROR_PRIVILEGE_NOT_HELD diff --git a/vendor/github.com/hashicorp/go-getter/get_gcs.go b/vendor/github.com/hashicorp/go-getter/v2/get_gcs.go similarity index 82% rename from vendor/github.com/hashicorp/go-getter/get_gcs.go rename to vendor/github.com/hashicorp/go-getter/v2/get_gcs.go index 6faa70f4f..a1c6409c1 100644 --- a/vendor/github.com/hashicorp/go-getter/get_gcs.go +++ b/vendor/github.com/hashicorp/go-getter/v2/get_gcs.go @@ -18,8 +18,7 @@ type GCSGetter struct { getter } -func (g *GCSGetter) ClientMode(u *url.URL) (ClientMode, error) { - ctx := g.Context() +func (g *GCSGetter) Mode(ctx context.Context, u *url.URL) (Mode, error) { // Parse URL bucket, object, err := g.parseURL(u) @@ -43,41 +42,39 @@ func (g *GCSGetter) ClientMode(u *url.URL) (ClientMode, error) { } if strings.HasSuffix(obj.Name, "/") { // A directory matched the prefix search, so this must be a directory - return ClientModeDir, nil + return ModeDir, nil } else if obj.Name != object { // A file matched the prefix search and doesn't have the same name // as the query, so this must be a directory - return ClientModeDir, nil + return ModeDir, nil } } // There are no directories or subdirectories, and if a match was returned, // it was exactly equal to the prefix search. So return File mode - return ClientModeFile, nil + return ModeFile, nil } -func (g *GCSGetter) Get(dst string, u *url.URL) error { - ctx := g.Context() - +func (g *GCSGetter) Get(ctx context.Context, req *Request) error { // Parse URL - bucket, object, err := g.parseURL(u) + bucket, object, err := g.parseURL(req.u) if err != nil { return err } // Remove destination if it already exists - _, err = os.Stat(dst) + _, err = os.Stat(req.Dst) if err != nil && !os.IsNotExist(err) { return err } if err == nil { // Remove the destination - if err := os.RemoveAll(dst); err != nil { + if err := os.RemoveAll(req.Dst); err != nil { return err } } // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(req.Dst), 0755); err != nil { return err } @@ -103,7 +100,7 @@ func (g *GCSGetter) Get(dst string, u *url.URL) error { if err != nil { return err } - objDst = filepath.Join(dst, objDst) + objDst = filepath.Join(req.Dst, objDst) // Download the matching object. err = g.getObject(ctx, client, objDst, bucket, obj.Name) if err != nil { @@ -114,11 +111,9 @@ func (g *GCSGetter) Get(dst string, u *url.URL) error { return nil } -func (g *GCSGetter) GetFile(dst string, u *url.URL) error { - ctx := g.Context() - +func (g *GCSGetter) GetFile(ctx context.Context, req *Request) error { // Parse URL - bucket, object, err := g.parseURL(u) + bucket, object, err := g.parseURL(req.u) if err != nil { return err } @@ -127,7 +122,7 @@ func (g *GCSGetter) GetFile(dst string, u *url.URL) error { if err != nil { return err } - return g.getObject(ctx, client, dst, bucket, object) + return g.getObject(ctx, client, req.Dst, bucket, object) } func (g *GCSGetter) getObject(ctx context.Context, client *storage.Client, dst, bucket, object string) error { diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/v2/get_git.go similarity index 76% rename from vendor/github.com/hashicorp/go-getter/get_git.go rename to vendor/github.com/hashicorp/go-getter/v2/get_git.go index 67e8b2f49..4a0b044f7 100644 --- a/vendor/github.com/hashicorp/go-getter/get_git.go +++ b/vendor/github.com/hashicorp/go-getter/v2/get_git.go @@ -1,6 +1,7 @@ package getter import ( + "bytes" "context" "encoding/base64" "fmt" @@ -9,6 +10,7 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "runtime" "strconv" "strings" @@ -24,12 +26,13 @@ type GitGetter struct { getter } -func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) { - return ClientModeDir, nil +var defaultBranchRegexp = regexp.MustCompile(`\s->\sorigin/(.*)`) + +func (g *GitGetter) Mode(_ context.Context, u *url.URL) (Mode, error) { + return ModeDir, nil } -func (g *GitGetter) Get(dst string, u *url.URL) error { - ctx := g.Context() +func (g *GitGetter) Get(ctx context.Context, req *Request) error { if _, err := exec.LookPath("git"); err != nil { return fmt.Errorf("git must be available and on the PATH") } @@ -37,7 +40,10 @@ func (g *GitGetter) Get(dst string, u *url.URL) error { // The port number must be parseable as an integer. If not, the user // was probably trying to use a scp-style address, in which case the // ssh:// prefix must be removed to indicate that. - if portStr := u.Port(); portStr != "" { + // + // This is not necessary in versions of Go which have patched + // CVE-2019-14809 (e.g. Go 1.12.8+) + if portStr := req.u.Port(); portStr != "" { if _, err := strconv.ParseUint(portStr, 10, 16); err != nil { return fmt.Errorf("invalid port number %q; if using the \"scp-like\" git address scheme where a colon introduces the path instead, remove the ssh:// portion and use just the git:: prefix", portStr) } @@ -46,7 +52,7 @@ func (g *GitGetter) Get(dst string, u *url.URL) error { // Extract some query parameters we use var ref, sshKey string var depth int - q := u.Query() + q := req.u.Query() if len(q) > 0 { ref = q.Get("ref") q.Del("ref") @@ -60,9 +66,9 @@ func (g *GitGetter) Get(dst string, u *url.URL) error { q.Del("depth") // Copy the URL - var newU url.URL = *u - u = &newU - u.RawQuery = q.Encode() + var newU url.URL = *req.u + req.u = &newU + req.u.RawQuery = q.Encode() } var sshKeyFile string @@ -100,14 +106,14 @@ func (g *GitGetter) Get(dst string, u *url.URL) error { } // Clone or update the repository - _, err := os.Stat(dst) + _, err := os.Stat(req.Dst) if err != nil && !os.IsNotExist(err) { return err } if err == nil { - err = g.update(ctx, dst, sshKeyFile, ref, depth) + err = g.update(ctx, req.Dst, sshKeyFile, ref, depth) } else { - err = g.clone(ctx, dst, sshKeyFile, u, depth) + err = g.clone(ctx, sshKeyFile, depth, req) } if err != nil { return err @@ -115,18 +121,18 @@ func (g *GitGetter) Get(dst string, u *url.URL) error { // Next: check out the proper tag/branch if it is specified, and checkout if ref != "" { - if err := g.checkout(dst, ref); err != nil { + if err := g.checkout(req.Dst, ref); err != nil { return err } } // Lastly, download any/all submodules. - return g.fetchSubmodules(ctx, dst, sshKeyFile, depth) + return g.fetchSubmodules(ctx, req.Dst, sshKeyFile, depth) } // GetFile for Git doesn't support updating at this time. It will download // the file every time. -func (g *GitGetter) GetFile(dst string, u *url.URL) error { +func (g *GitGetter) GetFile(ctx context.Context, req *Request) error { td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err @@ -135,22 +141,26 @@ func (g *GitGetter) GetFile(dst string, u *url.URL) error { // Get the filename, and strip the filename from the URL so we can // just get the repository directly. - filename := filepath.Base(u.Path) - u.Path = filepath.Dir(u.Path) + filename := filepath.Base(req.u.Path) + req.u.Path = filepath.Dir(req.u.Path) + dst := req.Dst + req.Dst = td // Get the full repository - if err := g.Get(td, u); err != nil { + if err := g.Get(ctx, req); err != nil { return err } // Copy the single file - u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) + req.u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) if err != nil { return err } - fg := &FileGetter{Copy: true} - return fg.GetFile(dst, u) + fg := &FileGetter{} + req.Copy = true + req.Dst = dst + return fg.GetFile(ctx, req) } func (g *GitGetter) checkout(dst string, ref string) error { @@ -159,14 +169,14 @@ func (g *GitGetter) checkout(dst string, ref string) error { return getRunCommand(cmd) } -func (g *GitGetter) clone(ctx context.Context, dst, sshKeyFile string, u *url.URL, depth int) error { +func (g *GitGetter) clone(ctx context.Context, sshKeyFile string, depth int, req *Request) error { args := []string{"clone"} if depth > 0 { args = append(args, "--depth", strconv.Itoa(depth)) } - args = append(args, u.String(), dst) + args = append(args, req.u.String(), req.Dst) cmd := exec.CommandContext(ctx, "git", args...) setupGitEnv(cmd, sshKeyFile) return getRunCommand(cmd) @@ -179,10 +189,10 @@ func (g *GitGetter) update(ctx context.Context, dst, sshKeyFile, ref string, dep cmd.Dir = dst if getRunCommand(cmd) != nil { - // Not a branch, switch to master. This will also catch non-existent - // branches, in which case we want to switch to master and then - // checkout the proper branch later. - ref = "master" + // Not a branch, switch to default branch. This will also catch + // non-existent branches, in which case we want to switch to default + // and then checkout the proper branch later. + ref = findDefaultBranch(dst) } // We have to be on a branch to pull @@ -213,6 +223,22 @@ func (g *GitGetter) fetchSubmodules(ctx context.Context, dst, sshKeyFile string, return getRunCommand(cmd) } +// findDefaultBranch checks the repo's origin remote for its default branch +// (generally "master"). "master" is returned if an origin default branch +// can't be determined. +func findDefaultBranch(dst string) string { + var stdoutbuf bytes.Buffer + cmd := exec.Command("git", "branch", "-r", "--points-at", "refs/remotes/origin/HEAD") + cmd.Dir = dst + cmd.Stdout = &stdoutbuf + err := cmd.Run() + matches := defaultBranchRegexp.FindStringSubmatch(stdoutbuf.String()) + if err != nil || matches == nil { + return "master" + } + return matches[len(matches)-1] +} + // setupGitEnv sets up the environment for the given command. This is used to // pass configuration data to git and ssh and enables advanced cloning methods. func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/v2/get_hg.go similarity index 77% rename from vendor/github.com/hashicorp/go-getter/get_hg.go rename to vendor/github.com/hashicorp/go-getter/v2/get_hg.go index 290649c91..4532113e4 100644 --- a/vendor/github.com/hashicorp/go-getter/get_hg.go +++ b/vendor/github.com/hashicorp/go-getter/v2/get_hg.go @@ -19,17 +19,16 @@ type HgGetter struct { getter } -func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) { - return ClientModeDir, nil +func (g *HgGetter) Mode(ctx context.Context, _ *url.URL) (Mode, error) { + return ModeDir, nil } -func (g *HgGetter) Get(dst string, u *url.URL) error { - ctx := g.Context() +func (g *HgGetter) Get(ctx context.Context, req *Request) error { if _, err := exec.LookPath("hg"); err != nil { return fmt.Errorf("hg must be available and on the PATH") } - newURL, err := urlhelper.Parse(u.String()) + newURL, err := urlhelper.Parse(req.u.String()) if err != nil { return err } @@ -48,26 +47,26 @@ func (g *HgGetter) Get(dst string, u *url.URL) error { newURL.RawQuery = q.Encode() } - _, err = os.Stat(dst) + _, err = os.Stat(req.Dst) if err != nil && !os.IsNotExist(err) { return err } if err != nil { - if err := g.clone(dst, newURL); err != nil { + if err := g.clone(req.Dst, newURL); err != nil { return err } } - if err := g.pull(dst, newURL); err != nil { + if err := g.pull(req.Dst, newURL); err != nil { return err } - return g.update(ctx, dst, newURL, rev) + return g.update(ctx, req.Dst, newURL, rev) } // GetFile for Hg doesn't support updating at this time. It will download // the file every time. -func (g *HgGetter) GetFile(dst string, u *url.URL) error { +func (g *HgGetter) GetFile(ctx context.Context, req *Request) error { // Create a temporary directory to store the full source. This has to be // a non-existent directory. td, tdcloser, err := safetemp.Dir("", "getter") @@ -78,27 +77,31 @@ func (g *HgGetter) GetFile(dst string, u *url.URL) error { // Get the filename, and strip the filename from the URL so we can // just get the repository directly. - filename := filepath.Base(u.Path) - u.Path = filepath.ToSlash(filepath.Dir(u.Path)) + filename := filepath.Base(req.u.Path) + req.u.Path = filepath.Dir(req.u.Path) + dst := req.Dst + req.Dst = td // If we're on Windows, we need to set the host to "localhost" for hg if runtime.GOOS == "windows" { - u.Host = "localhost" + req.u.Host = "localhost" } // Get the full repository - if err := g.Get(td, u); err != nil { + if err := g.Get(ctx, req); err != nil { return err } // Copy the single file - u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) + req.u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) if err != nil { return err } - fg := &FileGetter{Copy: true, getter: g.getter} - return fg.GetFile(dst, u) + fg := &FileGetter{} + req.Copy = true + req.Dst = dst + return fg.GetFile(ctx, req) } func (g *HgGetter) clone(dst string, u *url.URL) error { diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/v2/get_http.go similarity index 76% rename from vendor/github.com/hashicorp/go-getter/get_http.go rename to vendor/github.com/hashicorp/go-getter/v2/get_http.go index 7c4541c6e..27671b32d 100644 --- a/vendor/github.com/hashicorp/go-getter/get_http.go +++ b/vendor/github.com/hashicorp/go-getter/v2/get_http.go @@ -9,7 +9,6 @@ import ( "net/url" "os" "path/filepath" - "strconv" "strings" safetemp "github.com/hashicorp/go-safetemp" @@ -53,22 +52,21 @@ type HttpGetter struct { Header http.Header } -func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { +func (g *HttpGetter) Mode(ctx context.Context, u *url.URL) (Mode, error) { if strings.HasSuffix(u.Path, "/") { - return ClientModeDir, nil + return ModeDir, nil } - return ClientModeFile, nil + return ModeFile, nil } -func (g *HttpGetter) Get(dst string, u *url.URL) error { - ctx := g.Context() +func (g *HttpGetter) Get(ctx context.Context, req *Request) error { // Copy the URL so we can modify it - var newU url.URL = *u - u = &newU + var newU url.URL = *req.u + req.u = &newU if g.Netrc { // Add auth from netrc if we can - if err := addAuthFromNetrc(u); err != nil { + if err := addAuthFromNetrc(req.u); err != nil { return err } } @@ -78,18 +76,20 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { } // Add terraform-get to the parameter. - q := u.Query() + q := req.u.Query() q.Add("terraform-get", "1") - u.RawQuery = q.Encode() + req.u.RawQuery = q.Encode() // Get the URL - req, err := http.NewRequest("GET", u.String(), nil) + httpReq, err := http.NewRequest("GET", req.u.String(), nil) if err != nil { return err } - req.Header = g.Header - resp, err := g.Client.Do(req) + if g.Header != nil { + httpReq.Header = g.Header.Clone() + } + resp, err := g.Client.Do(httpReq) if err != nil { return err } @@ -116,33 +116,38 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { // If there is a subdir component, then we download the root separately // into a temporary directory, then copy over the proper subdir. source, subDir := SourceDirSubdir(source) - if subDir == "" { - var opts []ClientOption - if g.client != nil { - opts = g.client.Options - } - return Get(dst, source, opts...) + req = &Request{ + Mode: ModeDir, + Src: source, + Dst: req.Dst, + } + if subDir == "" { + _, err = DefaultClient.Get(ctx, req) + return err } - // We have a subdir, time to jump some hoops - return g.getSubdir(ctx, dst, source, subDir) + return g.getSubdir(ctx, req.Dst, source, subDir) } -func (g *HttpGetter) GetFile(dst string, src *url.URL) error { - ctx := g.Context() +// GetFile fetches the file from src and stores it at dst. +// If the server supports Accept-Range, HttpGetter will attempt a range +// request. This means it is the caller's responsibility to ensure that an +// older version of the destination file does not exist, else it will be either +// falsely identified as being replaced, or corrupted with extra bytes +// appended. +func (g *HttpGetter) GetFile(ctx context.Context, req *Request) error { if g.Netrc { // Add auth from netrc if we can - if err := addAuthFromNetrc(src); err != nil { + if err := addAuthFromNetrc(req.u); err != nil { return err } } - // Create all the parent directories if needed - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(req.Dst), 0755); err != nil { return err } - f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, os.FileMode(0666)) + f, err := os.OpenFile(req.Dst, os.O_RDWR|os.O_CREATE, os.FileMode(0666)) if err != nil { return err } @@ -157,26 +162,25 @@ func (g *HttpGetter) GetFile(dst string, src *url.URL) error { // We first make a HEAD request so we can check // if the server supports range queries. If the server/URL doesn't // support HEAD requests, we just fall back to GET. - req, err := http.NewRequest("HEAD", src.String(), nil) + httpReq, err := http.NewRequest("HEAD", req.u.String(), nil) if err != nil { return err } if g.Header != nil { - req.Header = g.Header + httpReq.Header = g.Header.Clone() } - headResp, err := g.Client.Do(req) - if err == nil && headResp != nil { + headResp, err := g.Client.Do(httpReq) + if err == nil { headResp.Body.Close() if headResp.StatusCode == 200 { // If the HEAD request succeeded, then attempt to set the range // query if we can. - if headResp.Header.Get("Accept-Ranges") == "bytes" { + if headResp.Header.Get("Accept-Ranges") == "bytes" && headResp.ContentLength >= 0 { if fi, err := f.Stat(); err == nil { - if _, err = f.Seek(0, os.SEEK_END); err == nil { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) + if _, err = f.Seek(0, io.SeekEnd); err == nil { currentFileSize = fi.Size() - totalFileSize, _ := strconv.ParseInt(headResp.Header.Get("Content-Length"), 10, 64) - if currentFileSize >= totalFileSize { + httpReq.Header.Set("Range", fmt.Sprintf("bytes=%d-", currentFileSize)) + if currentFileSize >= headResp.ContentLength { // file already present return nil } @@ -185,9 +189,9 @@ func (g *HttpGetter) GetFile(dst string, src *url.URL) error { } } } - req.Method = "GET" + httpReq.Method = "GET" - resp, err := g.Client.Do(req) + resp, err := g.Client.Do(httpReq) if err != nil { return err } @@ -201,10 +205,10 @@ func (g *HttpGetter) GetFile(dst string, src *url.URL) error { body := resp.Body - if g.client != nil && g.client.ProgressListener != nil { + if req.ProgressListener != nil { // track download - fn := filepath.Base(src.EscapedPath()) - body = g.client.ProgressListener.TrackProgress(fn, currentFileSize, currentFileSize+resp.ContentLength, resp.Body) + fn := filepath.Base(req.u.EscapedPath()) + body = req.ProgressListener.TrackProgress(fn, currentFileSize, currentFileSize+resp.ContentLength, resp.Body) } defer body.Close() @@ -226,12 +230,8 @@ func (g *HttpGetter) getSubdir(ctx context.Context, dst, source, subDir string) } defer tdcloser.Close() - var opts []ClientOption - if g.client != nil { - opts = g.client.Options - } // Download that into the given directory - if err := Get(td, source, opts...); err != nil { + if _, err := Get(ctx, td, source); err != nil { return err } diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/v2/get_mock.go similarity index 60% rename from vendor/github.com/hashicorp/go-getter/get_mock.go rename to vendor/github.com/hashicorp/go-getter/v2/get_mock.go index e2a98ea28..26ea38143 100644 --- a/vendor/github.com/hashicorp/go-getter/get_mock.go +++ b/vendor/github.com/hashicorp/go-getter/v2/get_mock.go @@ -1,6 +1,7 @@ package getter import ( + "context" "net/url" ) @@ -23,32 +24,32 @@ type MockGetter struct { GetFileErr error } -func (g *MockGetter) Get(dst string, u *url.URL) error { +func (g *MockGetter) Get(ctx context.Context, req *Request) error { g.GetCalled = true - g.GetDst = dst - g.GetURL = u + g.GetDst = req.Dst + g.GetURL = req.u if g.Proxy != nil { - return g.Proxy.Get(dst, u) + return g.Proxy.Get(ctx, req) } return g.GetErr } -func (g *MockGetter) GetFile(dst string, u *url.URL) error { +func (g *MockGetter) GetFile(ctx context.Context, req *Request) error { g.GetFileCalled = true - g.GetFileDst = dst - g.GetFileURL = u + g.GetFileDst = req.Dst + g.GetFileURL = req.u if g.Proxy != nil { - return g.Proxy.GetFile(dst, u) + return g.Proxy.GetFile(ctx, req) } return g.GetFileErr } -func (g *MockGetter) ClientMode(u *url.URL) (ClientMode, error) { +func (g *MockGetter) Mode(ctx context.Context, u *url.URL) (Mode, error) { if l := len(u.Path); l > 0 && u.Path[l-1:] == "/" { - return ClientModeDir, nil + return ModeDir, nil } - return ClientModeFile, nil + return ModeFile, nil } diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/v2/get_s3.go similarity index 86% rename from vendor/github.com/hashicorp/go-getter/get_s3.go rename to vendor/github.com/hashicorp/go-getter/v2/get_s3.go index 93eeb0b81..77d8fb317 100644 --- a/vendor/github.com/hashicorp/go-getter/get_s3.go +++ b/vendor/github.com/hashicorp/go-getter/v2/get_s3.go @@ -22,7 +22,7 @@ type S3Getter struct { getter } -func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { +func (g *S3Getter) Mode(ctx context.Context, u *url.URL) (Mode, error) { // Parse URL region, bucket, path, _, creds, err := g.parseUrl(u) if err != nil { @@ -47,48 +47,47 @@ func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { for _, o := range resp.Contents { // Use file mode on exact match. if *o.Key == path { - return ClientModeFile, nil + return ModeFile, nil } // Use dir mode if child keys are found. if strings.HasPrefix(*o.Key, path+"/") { - return ClientModeDir, nil + return ModeDir, nil } } // There was no match, so just return file mode. The download is going // to fail but we will let S3 return the proper error later. - return ClientModeFile, nil + return ModeFile, nil } -func (g *S3Getter) Get(dst string, u *url.URL) error { - ctx := g.Context() +func (g *S3Getter) Get(ctx context.Context, req *Request) error { // Parse URL - region, bucket, path, _, creds, err := g.parseUrl(u) + region, bucket, path, _, creds, err := g.parseUrl(req.u) if err != nil { return err } // Remove destination if it already exists - _, err = os.Stat(dst) + _, err = os.Stat(req.Dst) if err != nil && !os.IsNotExist(err) { return err } if err == nil { // Remove the destination - if err := os.RemoveAll(dst); err != nil { + if err := os.RemoveAll(req.Dst); err != nil { return err } } // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(req.Dst), 0755); err != nil { return err } - config := g.getAWSConfig(region, u, creds) + config := g.getAWSConfig(region, req.u, creds) sess := session.New(config) client := s3.New(sess) @@ -96,15 +95,15 @@ func (g *S3Getter) Get(dst string, u *url.URL) error { lastMarker := "" hasMore := true for hasMore { - req := &s3.ListObjectsInput{ + s3Req := &s3.ListObjectsInput{ Bucket: aws.String(bucket), Prefix: aws.String(path), } if lastMarker != "" { - req.Marker = aws.String(lastMarker) + s3Req.Marker = aws.String(lastMarker) } - resp, err := client.ListObjects(req) + resp, err := client.ListObjects(s3Req) if err != nil { return err } @@ -126,7 +125,7 @@ func (g *S3Getter) Get(dst string, u *url.URL) error { if err != nil { return err } - objDst = filepath.Join(dst, objDst) + objDst = filepath.Join(req.Dst, objDst) if err := g.getObject(ctx, client, objDst, bucket, objPath, ""); err != nil { return err @@ -137,17 +136,16 @@ func (g *S3Getter) Get(dst string, u *url.URL) error { return nil } -func (g *S3Getter) GetFile(dst string, u *url.URL) error { - ctx := g.Context() - region, bucket, path, version, creds, err := g.parseUrl(u) +func (g *S3Getter) GetFile(ctx context.Context, req *Request) error { + region, bucket, path, version, creds, err := g.parseUrl(req.u) if err != nil { return err } - config := g.getAWSConfig(region, u, creds) + config := g.getAWSConfig(region, req.u, creds) sess := session.New(config) client := s3.New(sess) - return g.getObject(ctx, client, dst, bucket, path, version) + return g.getObject(ctx, client, req.Dst, bucket, path, version) } func (g *S3Getter) getObject(ctx context.Context, client *s3.S3, dst, bucket, key, version string) error { diff --git a/vendor/github.com/hashicorp/go-getter/go.mod b/vendor/github.com/hashicorp/go-getter/v2/go.mod similarity index 90% rename from vendor/github.com/hashicorp/go-getter/go.mod rename to vendor/github.com/hashicorp/go-getter/v2/go.mod index a869e8f80..3335e86a2 100644 --- a/vendor/github.com/hashicorp/go-getter/go.mod +++ b/vendor/github.com/hashicorp/go-getter/v2/go.mod @@ -1,4 +1,4 @@ -module github.com/hashicorp/go-getter +module github.com/hashicorp/go-getter/v2 require ( cloud.google.com/go v0.45.1 @@ -7,6 +7,7 @@ require ( github.com/cheggaaa/pb v1.0.27 github.com/davecgh/go-spew v1.1.1 // indirect github.com/fatih/color v1.7.0 // indirect + github.com/google/go-cmp v0.3.0 github.com/hashicorp/go-cleanhttp v0.5.0 github.com/hashicorp/go-safetemp v1.0.0 github.com/hashicorp/go-version v1.1.0 @@ -21,3 +22,5 @@ require ( google.golang.org/api v0.9.0 gopkg.in/cheggaaa/pb.v1 v1.0.27 // indirect ) + +go 1.13 diff --git a/vendor/github.com/hashicorp/go-getter/go.sum b/vendor/github.com/hashicorp/go-getter/v2/go.sum similarity index 100% rename from vendor/github.com/hashicorp/go-getter/go.sum rename to vendor/github.com/hashicorp/go-getter/v2/go.sum diff --git a/vendor/github.com/hashicorp/go-getter/v2/helper/url/url.go b/vendor/github.com/hashicorp/go-getter/v2/helper/url/url.go new file mode 100644 index 000000000..02497c254 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/v2/helper/url/url.go @@ -0,0 +1,14 @@ +package url + +import ( + "net/url" +) + +// Parse parses rawURL into a URL structure. +// The rawURL may be relative or absolute. +// +// Parse is a wrapper for the Go stdlib net/url Parse function, but returns +// Windows "safe" URLs on Windows platforms. +func Parse(rawURL string) (*url.URL, error) { + return parse(rawURL) +} diff --git a/vendor/github.com/hashicorp/go-getter/v2/helper/url/url_unix.go b/vendor/github.com/hashicorp/go-getter/v2/helper/url/url_unix.go new file mode 100644 index 000000000..ed1352a91 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/v2/helper/url/url_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package url + +import ( + "net/url" +) + +func parse(rawURL string) (*url.URL, error) { + return url.Parse(rawURL) +} diff --git a/vendor/github.com/hashicorp/go-getter/v2/helper/url/url_windows.go b/vendor/github.com/hashicorp/go-getter/v2/helper/url/url_windows.go new file mode 100644 index 000000000..4280ec59a --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/v2/helper/url/url_windows.go @@ -0,0 +1,39 @@ +package url + +import ( + "fmt" + "net/url" + "path/filepath" + "strings" +) + +func parse(rawURL string) (*url.URL, error) { + // Make sure we're using "/" since URLs are "/"-based. + rawURL = filepath.ToSlash(rawURL) + + if len(rawURL) > 1 && rawURL[1] == ':' { + // Assume we're dealing with a drive letter. In which case we + // force the 'file' scheme to avoid "net/url" URL.String() prepending + // our url with "./". + rawURL = "file://" + rawURL + } + + u, err := url.Parse(rawURL) + if err != nil { + return nil, err + } + + if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") { + // Assume we're dealing with a drive letter file path where the drive + // letter has been parsed into the URL Host. + u.Path = fmt.Sprintf("%s%s", u.Host, u.Path) + u.Host = "" + } + + // Remove leading slash for absolute file paths. + if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' { + u.Path = u.Path[1:] + } + + return u, err +} diff --git a/vendor/github.com/hashicorp/go-getter/client_mode.go b/vendor/github.com/hashicorp/go-getter/v2/mode.go similarity index 53% rename from vendor/github.com/hashicorp/go-getter/client_mode.go rename to vendor/github.com/hashicorp/go-getter/v2/mode.go index 7f02509a7..c9fd2d9c7 100644 --- a/vendor/github.com/hashicorp/go-getter/client_mode.go +++ b/vendor/github.com/hashicorp/go-getter/v2/mode.go @@ -1,24 +1,24 @@ package getter -// ClientMode is the mode that the client operates in. -type ClientMode uint +// Mode is the mode that the client operates in. +type Mode uint const ( - ClientModeInvalid ClientMode = iota + ModeInvalid Mode = iota - // ClientModeAny downloads anything it can. In this mode, dst must + // ModeAny downloads anything it can. In this mode, dst must // be a directory. If src is a file, it is saved into the directory // with the basename of the URL. If src is a directory or archive, // it is unpacked directly into dst. - ClientModeAny + ModeAny - // ClientModeFile downloads a single file. In this mode, dst must + // ModeFile downloads a single file. In this mode, dst must // be a file path (doesn't have to exist). src must point to a single // file. It is saved as dst. - ClientModeFile + ModeFile - // ClientModeDir downloads a directory. In this mode, dst must be + // ModeDir downloads a directory. In this mode, dst must be // a directory path (doesn't have to exist). src must point to an // archive or directory (such as in s3). - ClientModeDir + ModeDir ) diff --git a/vendor/github.com/hashicorp/go-getter/netrc.go b/vendor/github.com/hashicorp/go-getter/v2/netrc.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/netrc.go rename to vendor/github.com/hashicorp/go-getter/v2/netrc.go diff --git a/vendor/github.com/hashicorp/go-getter/v2/request.go b/vendor/github.com/hashicorp/go-getter/v2/request.go new file mode 100644 index 000000000..f8f24d592 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/v2/request.go @@ -0,0 +1,39 @@ +package getter + +import "net/url" + +type Request struct { + // Src is the source URL to get. + // + // Dst is the path to save the downloaded thing as. If Dir is set to + // true, then this should be a directory. If the directory doesn't exist, + // it will be created for you. + // + // Pwd is the working directory for detection. If this isn't set, some + // detection may fail. Client will not default pwd to the current + // working directory for security reasons. + Src string + Dst string + Pwd string + + // Mode is the method of download the client will use. See Mode + // for documentation. + Mode Mode + + // Copy, in local file mode if set to true, will copy data instead of using + // a symlink. If false, attempts to symlink to speed up the operation and + // to lower the disk space usage. If the symlink fails, may attempt to copy + // on windows. + Copy bool + + // Inplace, in local file mode if set to true, do nothing and the returned + // operation will simply contain the source file path. Inplace has precedence + // over Copy. + Inplace bool + + // ProgressListener allows to track file downloads. + // By default a no op progress listener is used. + ProgressListener ProgressTracker + + u *url.URL +} diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/v2/source.go similarity index 100% rename from vendor/github.com/hashicorp/go-getter/source.go rename to vendor/github.com/hashicorp/go-getter/v2/source.go diff --git a/vendor/github.com/hashicorp/go-getter/storage.go b/vendor/github.com/hashicorp/go-getter/v2/storage.go similarity index 85% rename from vendor/github.com/hashicorp/go-getter/storage.go rename to vendor/github.com/hashicorp/go-getter/v2/storage.go index 2bc6b9ec3..3ccc1c3af 100644 --- a/vendor/github.com/hashicorp/go-getter/storage.go +++ b/vendor/github.com/hashicorp/go-getter/v2/storage.go @@ -1,5 +1,7 @@ package getter +import "context" + // Storage is an interface that knows how to lookup downloaded directories // as well as download and update directories from their sources into the // proper location. @@ -9,5 +11,5 @@ type Storage interface { Dir(string) (string, bool, error) // Get will download and optionally update the given directory. - Get(string, string, bool) error + Get(context.Context, string, string, bool) error } diff --git a/vendor/modules.txt b/vendor/modules.txt index 8439a21a2..e07421010 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -318,8 +318,10 @@ github.com/hashicorp/go-cty-funcs/filesystem # github.com/hashicorp/go-cty-funcs/uuid v0.0.0-20200203151509-c92509f48b18 github.com/hashicorp/go-cty-funcs/uuid # github.com/hashicorp/go-getter v1.3.1-0.20190906090232-a0f878cb75da -github.com/hashicorp/go-getter github.com/hashicorp/go-getter/helper/url +# github.com/hashicorp/go-getter/v2 v2.0.0-20200206160058-e2a28063d6e7 +github.com/hashicorp/go-getter/v2 +github.com/hashicorp/go-getter/v2/helper/url # github.com/hashicorp/go-immutable-radix v1.0.0 github.com/hashicorp/go-immutable-radix # github.com/hashicorp/go-multierror v1.0.0 From 8e6bad209bef18c41d6ed7abe009311a125f0684 Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Thu, 6 Feb 2020 17:50:29 +0100 Subject: [PATCH 07/61] try to use isos inplace whatever the os --- common/step_download.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/common/step_download.go b/common/step_download.go index c15fc566d..465573af9 100644 --- a/common/step_download.go +++ b/common/step_download.go @@ -198,9 +198,7 @@ func (s *StepDownload) download(ctx context.Context, ui packer.Ui, source string ProgressListener: ui, Pwd: wd, Mode: getter.ModeFile, - } - if runtime.GOOS == "windows" { - req.Inplace = true + Inplace: true, } switch op, err := defaultGetterClient.Get(ctx, req); err.(type) { From a8e717ae097f4c896ab3dc518dc14a50a134cb95 Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Thu, 6 Feb 2020 17:51:15 +0100 Subject: [PATCH 08/61] ISOConfig.Prepare: fix ChecksumFromFile usage --- common/iso_config.go | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/common/iso_config.go b/common/iso_config.go index 08f355b34..0836035c8 100644 --- a/common/iso_config.go +++ b/common/iso_config.go @@ -3,12 +3,11 @@ package common import ( + "context" "encoding/hex" "errors" "fmt" - "log" "net/url" - "os" "strings" getter "github.com/hashicorp/go-getter/v2" @@ -108,7 +107,7 @@ type ISOConfig struct { TargetExtension string `mapstructure:"iso_target_extension"` } -func (c *ISOConfig) Prepare(ctx *interpolate.Context) (warnings []string, errs []error) { +func (c *ISOConfig) Prepare(*interpolate.Context) (warnings []string, errs []error) { if len(c.ISOUrls) != 0 && c.RawSingleISOUrl != "" { errs = append( errs, errors.New("Only one of iso_url or iso_urls must be specified")) @@ -166,20 +165,7 @@ func (c *ISOConfig) Prepare(ctx *interpolate.Context) (warnings []string, errs [ errs = append(errs, fmt.Errorf("error parsing URL <%s>: %s", c.ISOUrls[0], err)) } - wd, err := os.Getwd() - if err != nil { - log.Printf("get working directory: %v", err) - // here we ignore the error in case the - // working directory is not needed. - } - gc := getter.Client{ - Dst: "no-op", - Src: u.String(), - Pwd: wd, - Dir: false, - Getters: getter.Getters, - } - cksum, err := gc.ChecksumFromFile(c.ISOChecksumURL, u) + cksum, err := getter.DefaultClient.ChecksumFromFile(context.TODO(), c.ISOChecksumURL, u.Path) if cksum == nil || err != nil { errs = append(errs, fmt.Errorf("Couldn't extract checksum from checksum file")) } else { From d45eca5cdcb778d353c54f120d82e8c64e417f00 Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Thu, 6 Feb 2020 18:17:57 +0100 Subject: [PATCH 09/61] update tests --- common/step_download_test.go | 30 +++++++++--------------------- 1 file changed, 9 insertions(+), 21 deletions(-) diff --git a/common/step_download_test.go b/common/step_download_test.go index ce7961c1f..e50483781 100644 --- a/common/step_download_test.go +++ b/common/step_download_test.go @@ -5,7 +5,6 @@ import ( "context" "crypto/sha1" "encoding/hex" - "github.com/hashicorp/packer/packer" "io/ioutil" "log" "net/http" @@ -18,6 +17,7 @@ import ( "github.com/google/go-cmp/cmp" urlhelper "github.com/hashicorp/go-getter/v2/helper/url" "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/packer/tmp" ) @@ -74,7 +74,7 @@ func TestStepDownload_Run(t *testing.T) { fields{Url: []string{abs(t, "./test-fixtures/root/another.txt")}}, multistep.ActionContinue, []string{ - toSha1(abs(t, "./test-fixtures/root/another.txt")), + // toSha1(abs(t, "./test-fixtures/root/another.txt")), toSha1(abs(t, "./test-fixtures/root/another.txt")) + ".lock", }, }, @@ -82,7 +82,6 @@ func TestStepDownload_Run(t *testing.T) { fields{Url: []string{abs(t, "./test-fixtures/root//another.txt")}}, multistep.ActionContinue, []string{ - toSha1(abs(t, "./test-fixtures/root//another.txt")), toSha1(abs(t, "./test-fixtures/root//another.txt")) + ".lock", }, }, @@ -90,7 +89,6 @@ func TestStepDownload_Run(t *testing.T) { fields{Url: []string{abs(t, "./test-fixtures/root/another.txt")}, ChecksumType: "none"}, multistep.ActionContinue, []string{ - toSha1(abs(t, "./test-fixtures/root/another.txt")), toSha1(abs(t, "./test-fixtures/root/another.txt")) + ".lock", }, }, @@ -159,7 +157,6 @@ func TestStepDownload_Run(t *testing.T) { fields{Extension: "txt", Url: []string{"./test-fixtures/root/another.txt?checksum=" + cs["/root/another.txt"]}}, multistep.ActionContinue, []string{ - toSha1(cs["/root/another.txt"]) + ".txt", toSha1(cs["/root/another.txt"]) + ".txt.lock", }, }, @@ -167,7 +164,6 @@ func TestStepDownload_Run(t *testing.T) { fields{Extension: "txt", Url: []string{"./test-fixtures/root/another.txt?"}, Checksum: cs["/root/another.txt"]}, multistep.ActionContinue, []string{ - toSha1(cs["/root/another.txt"]) + ".txt", toSha1(cs["/root/another.txt"]) + ".txt.lock", }, }, @@ -175,7 +171,6 @@ func TestStepDownload_Run(t *testing.T) { fields{Extension: "txt", Url: []string{"./test-fixtures/root/another.txt?"}, ChecksumType: "sha1", Checksum: cs["/root/another.txt"]}, multistep.ActionContinue, []string{ - toSha1(cs["/root/another.txt"]) + ".txt", toSha1(cs["/root/another.txt"]) + ".txt.lock", }, }, @@ -183,7 +178,6 @@ func TestStepDownload_Run(t *testing.T) { fields{Extension: "txt", Url: []string{abs(t, "./test-fixtures/root/another.txt") + "?checksum=" + cs["/root/another.txt"]}}, multistep.ActionContinue, []string{ - toSha1(cs["/root/another.txt"]) + ".txt", toSha1(cs["/root/another.txt"]) + ".txt.lock", }, }, @@ -191,7 +185,6 @@ func TestStepDownload_Run(t *testing.T) { fields{Extension: "txt", Url: []string{abs(t, "./test-fixtures/root/another.txt") + "?"}, Checksum: cs["/root/another.txt"]}, multistep.ActionContinue, []string{ - toSha1(cs["/root/another.txt"]) + ".txt", toSha1(cs["/root/another.txt"]) + ".txt.lock", }, }, @@ -199,7 +192,6 @@ func TestStepDownload_Run(t *testing.T) { fields{Extension: "txt", Url: []string{abs(t, "./test-fixtures/root/another.txt") + "?"}, ChecksumType: "sha1", Checksum: cs["/root/another.txt"]}, multistep.ActionContinue, []string{ - toSha1(cs["/root/another.txt"]) + ".txt", toSha1(cs["/root/another.txt"]) + ".txt.lock", }, }, @@ -214,7 +206,6 @@ func TestStepDownload_Run(t *testing.T) { }, multistep.ActionContinue, []string{ - toSha1(cs["/root/basic.txt"]), toSha1(cs["/root/basic.txt"]) + ".lock", }, }, @@ -272,9 +263,8 @@ func TestStepDownload_download(t *testing.T) { if err != nil { t.Fatalf("Bad: non expected error %s", err.Error()) } - if filepath.Ext(path) != "."+step.Extension { - t.Fatalf("bad: path should contain extension %s but it was %s", step.Extension, filepath.Ext(path)) - } + // because of the inplace option; the result file will not be renamed + // sha.ova. os.RemoveAll(step.TargetPath) // Abs path with no extension provided @@ -284,20 +274,18 @@ func TestStepDownload_download(t *testing.T) { if err != nil { t.Fatalf("Bad: non expected error %s", err.Error()) } - if filepath.Ext(path) != ".iso" { - t.Fatalf("bad: path should contain extension %s but it was .iso", step.Extension) - } + // because of the inplace option; the result file will not be renamed + // sha.ova. os.RemoveAll(step.TargetPath) // Path with file step.TargetPath = "./packer/file.iso" - path, err = step.download(context.TODO(), ui, "./test-fixtures/root/basic.txt") + _, err = step.download(context.TODO(), ui, "./test-fixtures/root/basic.txt") if err != nil { t.Fatalf("Bad: non expected error %s", err.Error()) } - if path != "./packer/file.iso" { - t.Fatalf("bad: path should be ./packer/file.iso but it was %s", path) - } + // because of the inplace option; the result file will not be renamed + // sha.ova. os.RemoveAll(step.TargetPath) } From f91429f256b307a06f855188775c3b7c2e37f9e3 Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Thu, 6 Feb 2020 18:18:03 +0100 Subject: [PATCH 10/61] fix goimports --- common/step_provision_test.go | 2 +- common/terminal_posix.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/common/step_provision_test.go b/common/step_provision_test.go index 809aaed64..afd4019b9 100644 --- a/common/step_provision_test.go +++ b/common/step_provision_test.go @@ -2,10 +2,10 @@ package common import ( "fmt" - "github.com/hashicorp/packer/helper/communicator" "os" "testing" + "github.com/hashicorp/packer/helper/communicator" "github.com/hashicorp/packer/helper/multistep" ) diff --git a/common/terminal_posix.go b/common/terminal_posix.go index 6c69d79e9..2a98a4fbd 100644 --- a/common/terminal_posix.go +++ b/common/terminal_posix.go @@ -4,8 +4,9 @@ package common // Imports for determining terminal information across platforms import ( - "golang.org/x/sys/unix" "os" + + "golang.org/x/sys/unix" ) // posix api From 973a1ea103c8de23cccb2552c16b77761c9a1bbc Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Thu, 6 Feb 2020 18:23:08 +0100 Subject: [PATCH 11/61] remove comented code --- common/step_download_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/common/step_download_test.go b/common/step_download_test.go index e50483781..b7e297b37 100644 --- a/common/step_download_test.go +++ b/common/step_download_test.go @@ -74,7 +74,6 @@ func TestStepDownload_Run(t *testing.T) { fields{Url: []string{abs(t, "./test-fixtures/root/another.txt")}}, multistep.ActionContinue, []string{ - // toSha1(abs(t, "./test-fixtures/root/another.txt")), toSha1(abs(t, "./test-fixtures/root/another.txt")) + ".lock", }, }, From 72c2731f7b5f12b554af13fb7dedcb69f3384d59 Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Thu, 6 Feb 2020 18:24:13 +0100 Subject: [PATCH 12/61] Update step_download_test.go --- common/step_download_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/step_download_test.go b/common/step_download_test.go index b7e297b37..fdf50e43a 100644 --- a/common/step_download_test.go +++ b/common/step_download_test.go @@ -258,7 +258,7 @@ func TestStepDownload_download(t *testing.T) { // Abs path with extension provided step.TargetPath = "./packer" step.Extension = "ova" - path, err := step.download(context.TODO(), ui, "./test-fixtures/root/basic.txt") + _, err := step.download(context.TODO(), ui, "./test-fixtures/root/basic.txt") if err != nil { t.Fatalf("Bad: non expected error %s", err.Error()) } @@ -269,7 +269,7 @@ func TestStepDownload_download(t *testing.T) { // Abs path with no extension provided step.TargetPath = "./packer" step.Extension = "" - path, err = step.download(context.TODO(), ui, "./test-fixtures/root/basic.txt") + _, err = step.download(context.TODO(), ui, "./test-fixtures/root/basic.txt") if err != nil { t.Fatalf("Bad: non expected error %s", err.Error()) } From 6d6b94d515d86012a453d449a8c73aff5a73e9bd Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 23 Jan 2020 14:28:54 -0800 Subject: [PATCH 13/61] Add ability to use custom keyvault into azure builds --- builder/azure/arm/builder.go | 23 +++++++-- builder/azure/arm/config.go | 5 +- .../azure/arm/step_certificate_in_keyvault.go | 50 +++++++++++++++++++ .../azure/arm/step_delete_resource_group.go | 9 ++-- builder/azure/arm/step_deploy_template.go | 4 ++ builder/azure/common/constants/stateBag.go | 1 + builder/azure/common/vault.go | 44 +++++++++++++++- 7 files changed, 127 insertions(+), 9 deletions(-) create mode 100644 builder/azure/arm/step_certificate_in_keyvault.go diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index 799659c50..1bd63f426 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -231,8 +231,16 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack keyVaultDeploymentName := b.stateBag.Get(constants.ArmKeyVaultDeploymentName).(string) steps = []multistep.Step{ NewStepCreateResourceGroup(azureClient, ui), - NewStepValidateTemplate(azureClient, ui, &b.config, GetKeyVaultDeployment), - NewStepDeployTemplate(azureClient, ui, &b.config, keyVaultDeploymentName, GetKeyVaultDeployment), + } + if b.config.BuildKeyVaultName == "" { + steps = append(steps, + NewStepValidateTemplate(azureClient, ui, &b.config, GetKeyVaultDeployment), + NewStepDeployTemplate(azureClient, ui, &b.config, keyVaultDeploymentName, GetKeyVaultDeployment), + ) + } else { + steps = append(steps, NewStepCertificateInKeyVault(azureClient, ui, &b.config)) + } + steps = append(steps, NewStepGetCertificate(azureClient, ui), NewStepSetCertificate(&b.config, ui), NewStepValidateTemplate(azureClient, ui, &b.config, GetVirtualMachineDeployment), @@ -261,7 +269,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack NewStepDeleteResourceGroup(azureClient, ui), NewStepDeleteOSDisk(azureClient, ui), NewStepDeleteAdditionalDisks(azureClient, ui), - } + ) } else { return nil, fmt.Errorf("Builder does not support the os_type '%s'", b.config.OSType) } @@ -395,7 +403,14 @@ func (b *Builder) configureStateBag(stateBag multistep.StateBag) { stateBag.Put(constants.ArmKeyVaultDeploymentName, fmt.Sprintf("kv%s", b.config.tmpDeploymentName)) } - stateBag.Put(constants.ArmKeyVaultName, b.config.tmpKeyVaultName) + if b.config.BuildKeyVaultName != "" { + stateBag.Put(constants.ArmKeyVaultName, b.config.BuildKeyVaultName) + b.config.tmpKeyVaultName = b.config.BuildKeyVaultName + stateBag.Put(constants.ArmIsExistingKeyVault, false) + } else { + stateBag.Put(constants.ArmKeyVaultName, b.config.tmpKeyVaultName) + stateBag.Put(constants.ArmIsExistingKeyVault, true) + } stateBag.Put(constants.ArmNicName, b.config.tmpNicName) stateBag.Put(constants.ArmPublicIPAddressName, b.config.tmpPublicIPAddressName) stateBag.Put(constants.ArmResourceGroupName, b.config.BuildResourceGroupName) diff --git a/builder/azure/arm/config.go b/builder/azure/arm/config.go index 18b426e19..3b701eae7 100644 --- a/builder/azure/arm/config.go +++ b/builder/azure/arm/config.go @@ -253,7 +253,10 @@ type Config struct { // group is deleted at the end of the build. TempResourceGroupName string `mapstructure:"temp_resource_group_name"` // Specify an existing resource group to run the build in. - BuildResourceGroupName string `mapstructure:"build_resource_group_name"` + BuildResourceGroupName string `mapstructure:"build_resource_group_name"` + // Specify an existing key vault to use for uploading certificates to the + // instance to connect. + BuildKeyVaultName string `mapstructure:"build_key_vault_name"` storageAccountBlobEndpoint string // This value allows you to // set a virtual_network_name and obtain a public IP. If this value is not diff --git a/builder/azure/arm/step_certificate_in_keyvault.go b/builder/azure/arm/step_certificate_in_keyvault.go new file mode 100644 index 000000000..a46b160ce --- /dev/null +++ b/builder/azure/arm/step_certificate_in_keyvault.go @@ -0,0 +1,50 @@ +package arm + +import ( + "context" + "fmt" + + "github.com/hashicorp/packer/builder/azure/common/constants" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +type StepCertificateInKeyVault struct { + config *Config + client *AzureClient + say func(message string) + error func(e error) +} + +func NewStepCertificateInKeyVault(cli *AzureClient, ui packer.Ui, config *Config) *StepCertificateInKeyVault { + var step = &StepCertificateInKeyVault{ + client: cli, + config: config, + say: func(message string) { ui.Say(message) }, + error: func(e error) { ui.Error(e.Error()) }, + } + + return step +} + +func (s *StepCertificateInKeyVault) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + s.say("Setting the certificate in the KeyVault...") + + var keyVaultName = state.Get(constants.ArmKeyVaultName).(string) + // err := s.client.CreateKey(keyVaultName, DefaultSecretName) + // if err != nil { + // s.error(fmt.Errorf("Error setting winrm cert in custom keyvault: %s", err)) + // return multistep.ActionHalt + // } + + err := s.client.SetSecret(keyVaultName, DefaultSecretName, s.config.winrmCertificate) + if err != nil { + s.error(fmt.Errorf("Error setting winrm cert in custom keyvault: %s", err)) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (*StepCertificateInKeyVault) Cleanup(multistep.StateBag) { +} diff --git a/builder/azure/arm/step_delete_resource_group.go b/builder/azure/arm/step_delete_resource_group.go index aac1c4b91..80564b1c6 100644 --- a/builder/azure/arm/step_delete_resource_group.go +++ b/builder/azure/arm/step_delete_resource_group.go @@ -44,9 +44,12 @@ func (s *StepDeleteResourceGroup) deleteResourceGroup(ctx context.Context, state } if keyVaultDeploymentName, ok := state.GetOk(constants.ArmKeyVaultDeploymentName); ok { - err = s.deleteDeploymentResources(ctx, keyVaultDeploymentName.(string), resourceGroupName) - if err != nil { - return err + // Only delete if custom keyvault was not provided. + if exists := state.Get(constants.ArmIsExistingKeyVault).(bool); exists { + err = s.deleteDeploymentResources(ctx, keyVaultDeploymentName.(string), resourceGroupName) + if err != nil { + return err + } } } diff --git a/builder/azure/arm/step_deploy_template.go b/builder/azure/arm/step_deploy_template.go index a8626bd00..15d19394f 100644 --- a/builder/azure/arm/step_deploy_template.go +++ b/builder/azure/arm/step_deploy_template.go @@ -76,6 +76,10 @@ func (s *StepDeployTemplate) deleteTemplate(ctx context.Context, state multistep } func (s *StepDeployTemplate) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + if s.config.BuildKeyVaultName != "" { + // Deployment already exists + + } s.say("Deploying deployment template ...") var resourceGroupName = state.Get(constants.ArmResourceGroupName).(string) diff --git a/builder/azure/common/constants/stateBag.go b/builder/azure/common/constants/stateBag.go index 0bf0ad1ae..b18e5f152 100644 --- a/builder/azure/common/constants/stateBag.go +++ b/builder/azure/common/constants/stateBag.go @@ -36,6 +36,7 @@ const ( ArmTags string = "arm.Tags" ArmVirtualMachineCaptureParameters string = "arm.VirtualMachineCaptureParameters" ArmIsExistingResourceGroup string = "arm.IsExistingResourceGroup" + ArmIsExistingKeyVault string = "arm.IsExistingKeyVault" ArmIsManagedImage string = "arm.IsManagedImage" ArmManagedImageResourceGroupName string = "arm.ManagedImageResourceGroupName" diff --git a/builder/azure/common/vault.go b/builder/azure/common/vault.go index 1bda604a2..3c53c0074 100644 --- a/builder/azure/common/vault.go +++ b/builder/azure/common/vault.go @@ -54,7 +54,8 @@ func (client *VaultClient) GetSecret(vaultName, secretName string) (*Secret, err autorest.AsGet(), autorest.WithBaseURL(client.getVaultUrl(vaultName)), autorest.WithPathParameters("/secrets/{secret-name}", p), - autorest.WithQueryParameters(q)) + autorest.WithQueryParameters(q), + ) if err != nil { return nil, err @@ -86,6 +87,47 @@ func (client *VaultClient) GetSecret(vaultName, secretName string) (*Secret, err return &secret, nil } +func (client *VaultClient) SetSecret(vaultName, secretName string, secretValue string) error { + p := map[string]interface{}{ + "secret-name": autorest.Encode("path", secretName), + } + q := map[string]interface{}{ + "api-version": AzureVaultApiVersion, + } + + jsonBody := fmt.Sprintf(`{"value": "%s"}`, secretValue) + + req, err := autorest.Prepare( + &http.Request{}, + autorest.AsPut(), + autorest.AsContentType("application/json; charset=utf-8"), + autorest.WithBaseURL(client.getVaultUrl(vaultName)), + autorest.WithPathParameters("/secrets/{secret-name}", p), + autorest.WithQueryParameters(q), + autorest.WithString(jsonBody), + ) + + if err != nil { + return err + } + + resp, err := autorest.SendWithSender(client, req) + if err != nil { + return err + } + + if resp.StatusCode != 200 { + return fmt.Errorf( + "Failed to set secret to %s/%s, HTTP status code=%d (%s)", + vaultName, + secretName, + resp.StatusCode, + http.StatusText(resp.StatusCode)) + } + + return nil +} + // Delete deletes the specified Azure key vault. // // resourceGroupName is the name of the Resource Group to which the vault belongs. vaultName is the name of the vault From 9643ad35f1afd1aa365b48e54bfb45dadf7b8ad7 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 6 Feb 2020 16:35:39 -0800 Subject: [PATCH 14/61] add tests --- builder/azure/arm/builder.go | 2 +- .../azure/arm/step_certificate_in_keyvault.go | 11 +--- .../arm/step_certificate_in_keyvault_test.go | 66 +++++++++++++++++++ builder/azure/arm/step_deploy_template.go | 4 -- builder/azure/common/vault.go | 9 +++ builder/azure/common/vault_client_mock.go | 56 ++++++++++++++++ 6 files changed, 135 insertions(+), 13 deletions(-) create mode 100644 builder/azure/arm/step_certificate_in_keyvault_test.go create mode 100644 builder/azure/common/vault_client_mock.go diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index 1bd63f426..55e889fe6 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -238,7 +238,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack NewStepDeployTemplate(azureClient, ui, &b.config, keyVaultDeploymentName, GetKeyVaultDeployment), ) } else { - steps = append(steps, NewStepCertificateInKeyVault(azureClient, ui, &b.config)) + steps = append(steps, NewStepCertificateInKeyVault(&azureClient.VaultClient, ui, &b.config)) } steps = append(steps, NewStepGetCertificate(azureClient, ui), diff --git a/builder/azure/arm/step_certificate_in_keyvault.go b/builder/azure/arm/step_certificate_in_keyvault.go index a46b160ce..d646acead 100644 --- a/builder/azure/arm/step_certificate_in_keyvault.go +++ b/builder/azure/arm/step_certificate_in_keyvault.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/hashicorp/packer/builder/azure/common" "github.com/hashicorp/packer/builder/azure/common/constants" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" @@ -11,12 +12,12 @@ import ( type StepCertificateInKeyVault struct { config *Config - client *AzureClient + client common.AZVaultClientIface say func(message string) error func(e error) } -func NewStepCertificateInKeyVault(cli *AzureClient, ui packer.Ui, config *Config) *StepCertificateInKeyVault { +func NewStepCertificateInKeyVault(cli common.AZVaultClientIface, ui packer.Ui, config *Config) *StepCertificateInKeyVault { var step = &StepCertificateInKeyVault{ client: cli, config: config, @@ -29,13 +30,7 @@ func NewStepCertificateInKeyVault(cli *AzureClient, ui packer.Ui, config *Config func (s *StepCertificateInKeyVault) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { s.say("Setting the certificate in the KeyVault...") - var keyVaultName = state.Get(constants.ArmKeyVaultName).(string) - // err := s.client.CreateKey(keyVaultName, DefaultSecretName) - // if err != nil { - // s.error(fmt.Errorf("Error setting winrm cert in custom keyvault: %s", err)) - // return multistep.ActionHalt - // } err := s.client.SetSecret(keyVaultName, DefaultSecretName, s.config.winrmCertificate) if err != nil { diff --git a/builder/azure/arm/step_certificate_in_keyvault_test.go b/builder/azure/arm/step_certificate_in_keyvault_test.go new file mode 100644 index 000000000..a246a7746 --- /dev/null +++ b/builder/azure/arm/step_certificate_in_keyvault_test.go @@ -0,0 +1,66 @@ +package arm + +import ( + "bytes" + "context" + "testing" + + azcommon "github.com/hashicorp/packer/builder/azure/common" + "github.com/hashicorp/packer/builder/azure/common/constants" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +func TestNewStepCertificateInKeyVault(t *testing.T) { + cli := azcommon.MockAZVaultClient{} + ui := &packer.BasicUi{ + Reader: new(bytes.Buffer), + Writer: new(bytes.Buffer), + } + state := new(multistep.BasicStateBag) + state.Put(constants.ArmKeyVaultName, "testKeyVaultName") + + config := &Config{ + winrmCertificate: "testCertificateString", + } + + certKVStep := NewStepCertificateInKeyVault(&cli, ui, config) + stepAction := certKVStep.Run(context.TODO(), state) + + if stepAction == multistep.ActionHalt { + t.Fatalf("step should have succeeded.") + } + if !cli.SetSecretCalled { + t.Fatalf("Step should have called SetSecret on Azure client.") + } + if cli.SetSecretCert != "testCertificateString" { + t.Fatalf("Step should have read cert from winRMCertificate field on config.") + } + if cli.SetSecretVaultName != "testKeyVaultName" { + t.Fatalf("step should have read keyvault name from state.") + } +} + +func TestNewStepCertificateInKeyVault_error(t *testing.T) { + // Tell mock to return an error + cli := azcommon.MockAZVaultClient{} + cli.IsError = true + + ui := &packer.BasicUi{ + Reader: new(bytes.Buffer), + Writer: new(bytes.Buffer), + } + state := new(multistep.BasicStateBag) + state.Put(constants.ArmKeyVaultName, "testKeyVaultName") + + config := &Config{ + winrmCertificate: "testCertificateString", + } + + certKVStep := NewStepCertificateInKeyVault(&cli, ui, config) + stepAction := certKVStep.Run(context.TODO(), state) + + if stepAction != multistep.ActionHalt { + t.Fatalf("step should have failed.") + } +} diff --git a/builder/azure/arm/step_deploy_template.go b/builder/azure/arm/step_deploy_template.go index 15d19394f..a8626bd00 100644 --- a/builder/azure/arm/step_deploy_template.go +++ b/builder/azure/arm/step_deploy_template.go @@ -76,10 +76,6 @@ func (s *StepDeployTemplate) deleteTemplate(ctx context.Context, state multistep } func (s *StepDeployTemplate) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { - if s.config.BuildKeyVaultName != "" { - // Deployment already exists - - } s.say("Deploying deployment template ...") var resourceGroupName = state.Get(constants.ArmResourceGroupName).(string) diff --git a/builder/azure/common/vault.go b/builder/azure/common/vault.go index 3c53c0074..2be3e8a36 100644 --- a/builder/azure/common/vault.go +++ b/builder/azure/common/vault.go @@ -16,6 +16,15 @@ const ( AzureVaultApiVersion = "2016-10-01" ) +// Enables us to test steps that access this cli +type AZVaultClientIface interface { + GetSecret(string, string) (*Secret, error) + SetSecret(string, string, string) error + DeletePreparer(string, string) (*http.Request, error) + DeleteResponder(*http.Response) (autorest.Response, error) + DeleteSender(*http.Request) (*http.Response, error) +} + type VaultClient struct { autorest.Client keyVaultEndpoint url.URL diff --git a/builder/azure/common/vault_client_mock.go b/builder/azure/common/vault_client_mock.go new file mode 100644 index 000000000..57bbd1c11 --- /dev/null +++ b/builder/azure/common/vault_client_mock.go @@ -0,0 +1,56 @@ +package common + +import ( + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" +) + +type MockAZVaultClient struct { + GetSecretCalled bool + SetSecretCalled bool + SetSecretVaultName string + SetSecretSecretName string + SetSecretCert string + DeleteResponderCalled bool + DeletePreparerCalled bool + DeleteSenderCalled bool + + IsError bool +} + +func (m *MockAZVaultClient) GetSecret(vaultName, secretName string) (*Secret, error) { + m.GetSecretCalled = true + var secret Secret + return &secret, nil +} + +func (m *MockAZVaultClient) SetSecret(vaultName, secretName string, secretValue string) error { + m.SetSecretCalled = true + m.SetSecretVaultName = vaultName + m.SetSecretSecretName = secretName + m.SetSecretCert = secretValue + + if m.IsError { + return fmt.Errorf("generic error!!") + } + + return nil +} + +func (m *MockAZVaultClient) DeletePreparer(resourceGroupName string, vaultName string) (*http.Request, error) { + m.DeletePreparerCalled = true + return nil, nil +} + +func (m *MockAZVaultClient) DeleteResponder(resp *http.Response) (autorest.Response, error) { + m.DeleteResponderCalled = true + var result autorest.Response + return result, nil +} + +func (m *MockAZVaultClient) DeleteSender(req *http.Request) (*http.Response, error) { + m.DeleteSenderCalled = true + return nil, nil +} From 7dd1fa44db2cc3b5bfeb2f740456273910531975 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 6 Feb 2020 16:39:41 -0800 Subject: [PATCH 15/61] regenerate code --- builder/azure/arm/config.hcl2spec.go | 2 ++ .../partials/builder/azure/arm/_Config-not-required.html.md | 3 +++ 2 files changed, 5 insertions(+) diff --git a/builder/azure/arm/config.hcl2spec.go b/builder/azure/arm/config.hcl2spec.go index 826100044..c3fbc8036 100644 --- a/builder/azure/arm/config.hcl2spec.go +++ b/builder/azure/arm/config.hcl2spec.go @@ -53,6 +53,7 @@ type FlatConfig struct { TempComputeName *string `mapstructure:"temp_compute_name" required:"false" cty:"temp_compute_name"` TempResourceGroupName *string `mapstructure:"temp_resource_group_name" cty:"temp_resource_group_name"` BuildResourceGroupName *string `mapstructure:"build_resource_group_name" cty:"build_resource_group_name"` + BuildKeyVaultName *string `mapstructure:"build_key_vault_name" cty:"build_key_vault_name"` PrivateVirtualNetworkWithPublicIp *bool `mapstructure:"private_virtual_network_with_public_ip" required:"false" cty:"private_virtual_network_with_public_ip"` VirtualNetworkName *string `mapstructure:"virtual_network_name" required:"false" cty:"virtual_network_name"` VirtualNetworkSubnetName *string `mapstructure:"virtual_network_subnet_name" required:"false" cty:"virtual_network_subnet_name"` @@ -166,6 +167,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { "temp_compute_name": &hcldec.AttrSpec{Name: "temp_compute_name", Type: cty.String, Required: false}, "temp_resource_group_name": &hcldec.AttrSpec{Name: "temp_resource_group_name", Type: cty.String, Required: false}, "build_resource_group_name": &hcldec.AttrSpec{Name: "build_resource_group_name", Type: cty.String, Required: false}, + "build_key_vault_name": &hcldec.AttrSpec{Name: "build_key_vault_name", Type: cty.String, Required: false}, "private_virtual_network_with_public_ip": &hcldec.AttrSpec{Name: "private_virtual_network_with_public_ip", Type: cty.Bool, Required: false}, "virtual_network_name": &hcldec.AttrSpec{Name: "virtual_network_name", Type: cty.String, Required: false}, "virtual_network_subnet_name": &hcldec.AttrSpec{Name: "virtual_network_subnet_name", Type: cty.String, Required: false}, diff --git a/website/source/partials/builder/azure/arm/_Config-not-required.html.md b/website/source/partials/builder/azure/arm/_Config-not-required.html.md index 01ab45847..7ed6fe482 100644 --- a/website/source/partials/builder/azure/arm/_Config-not-required.html.md +++ b/website/source/partials/builder/azure/arm/_Config-not-required.html.md @@ -132,6 +132,9 @@ - `build_resource_group_name` (string) - Specify an existing resource group to run the build in. +- `build_key_vault_name` (string) - Specify an existing key vault to use for uploading certificates to the + instance to connect. + - `private_virtual_network_with_public_ip` (bool) - This value allows you to set a virtual_network_name and obtain a public IP. If this value is not set and virtual_network_name is defined Packer is only allowed to be From 2181f10e79b1fdcf7abfd3da462b30930007c57e Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 6 Feb 2020 16:54:07 -0800 Subject: [PATCH 16/61] fix statebag setup; simplify conditional --- builder/azure/arm/builder.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index 55e889fe6..c891d3fe1 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -403,14 +403,14 @@ func (b *Builder) configureStateBag(stateBag multistep.StateBag) { stateBag.Put(constants.ArmKeyVaultDeploymentName, fmt.Sprintf("kv%s", b.config.tmpDeploymentName)) } + stateBag.Put(constants.ArmKeyVaultName, b.config.tmpKeyVaultName) + stateBag.Put(constants.ArmIsExistingKeyVault, false) if b.config.BuildKeyVaultName != "" { stateBag.Put(constants.ArmKeyVaultName, b.config.BuildKeyVaultName) b.config.tmpKeyVaultName = b.config.BuildKeyVaultName - stateBag.Put(constants.ArmIsExistingKeyVault, false) - } else { - stateBag.Put(constants.ArmKeyVaultName, b.config.tmpKeyVaultName) stateBag.Put(constants.ArmIsExistingKeyVault, true) } + stateBag.Put(constants.ArmNicName, b.config.tmpNicName) stateBag.Put(constants.ArmPublicIPAddressName, b.config.tmpPublicIPAddressName) stateBag.Put(constants.ArmResourceGroupName, b.config.BuildResourceGroupName) From 27d27463a6a5a428b16dea424d9b26c330e9a013 Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Fri, 7 Feb 2020 10:15:01 +0100 Subject: [PATCH 17/61] Update .travis.yml use go 1.13 --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 0696a6ad4..83218bf8b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,7 +9,7 @@ sudo: false language: go go: - - 1.12.x + - 1.13.x script: - df -h From 7d5f0c11af2baecb8d6cb6ee11206a8f69782637 Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Fri, 7 Feb 2020 16:36:14 +0100 Subject: [PATCH 18/61] add max_retries option to aws builders --- builder/amazon/chroot/builder.hcl2spec.go | 2 ++ builder/amazon/common/access_config.go | 7 +++++++ builder/amazon/ebs/builder.hcl2spec.go | 2 ++ builder/amazon/ebssurrogate/builder.hcl2spec.go | 2 ++ builder/amazon/ebsvolume/builder.hcl2spec.go | 2 ++ builder/amazon/instance/builder.hcl2spec.go | 2 ++ post-processor/amazon-import/post-processor.hcl2spec.go | 2 ++ .../amazon/common/_AccessConfig-not-required.html.md | 4 ++++ 8 files changed, 23 insertions(+) diff --git a/builder/amazon/chroot/builder.hcl2spec.go b/builder/amazon/chroot/builder.hcl2spec.go index 130ab5dd8..5565acc2c 100644 --- a/builder/amazon/chroot/builder.hcl2spec.go +++ b/builder/amazon/chroot/builder.hcl2spec.go @@ -41,6 +41,7 @@ type FlatConfig struct { CustomEndpointEc2 *string `mapstructure:"custom_endpoint_ec2" required:"false" cty:"custom_endpoint_ec2"` DecodeAuthZMessages *bool `mapstructure:"decode_authorization_messages" required:"false" cty:"decode_authorization_messages"` InsecureSkipTLSVerify *bool `mapstructure:"insecure_skip_tls_verify" required:"false" cty:"insecure_skip_tls_verify"` + MaxRetries *int `mapstructure:"max_retries" required:"false" cty:"max_retries"` MFACode *string `mapstructure:"mfa_code" required:"false" cty:"mfa_code"` ProfileName *string `mapstructure:"profile" required:"false" cty:"profile"` RawRegion *string `mapstructure:"region" required:"true" cty:"region"` @@ -112,6 +113,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { "custom_endpoint_ec2": &hcldec.AttrSpec{Name: "custom_endpoint_ec2", Type: cty.String, Required: false}, "decode_authorization_messages": &hcldec.AttrSpec{Name: "decode_authorization_messages", Type: cty.Bool, Required: false}, "insecure_skip_tls_verify": &hcldec.AttrSpec{Name: "insecure_skip_tls_verify", Type: cty.Bool, Required: false}, + "max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false}, "mfa_code": &hcldec.AttrSpec{Name: "mfa_code", Type: cty.String, Required: false}, "profile": &hcldec.AttrSpec{Name: "profile", Type: cty.String, Required: false}, "region": &hcldec.AttrSpec{Name: "region", Type: cty.String, Required: false}, diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go index a32decf07..c63dabd60 100644 --- a/builder/amazon/common/access_config.go +++ b/builder/amazon/common/access_config.go @@ -60,6 +60,10 @@ type AccessConfig struct { // This allows skipping TLS // verification of the AWS EC2 endpoint. The default is false. InsecureSkipTLSVerify bool `mapstructure:"insecure_skip_tls_verify" required:"false"` + // This is the maximum number of times an API call is retried, in the case + // where requests are being throttled or experiencing transient failures. + // The delay between the subsequent API calls increases exponentially. + MaxRetries int `mapstructure:"max_retries" required:"false"` // The MFA // [TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm) // code. This should probably be a user variable since it changes all the @@ -134,6 +138,9 @@ func (c *AccessConfig) Session() (*session.Session, error) { } config := aws.NewConfig().WithCredentialsChainVerboseErrors(true) + if c.MaxRetries > 0 { + config = config.WithMaxRetries(c.MaxRetries) + } staticCreds := credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token) if _, err := staticCreds.Get(); err != credentials.ErrStaticCredentialsEmpty { diff --git a/builder/amazon/ebs/builder.hcl2spec.go b/builder/amazon/ebs/builder.hcl2spec.go index 27501d772..c447e5717 100644 --- a/builder/amazon/ebs/builder.hcl2spec.go +++ b/builder/amazon/ebs/builder.hcl2spec.go @@ -21,6 +21,7 @@ type FlatConfig struct { CustomEndpointEc2 *string `mapstructure:"custom_endpoint_ec2" required:"false" cty:"custom_endpoint_ec2"` DecodeAuthZMessages *bool `mapstructure:"decode_authorization_messages" required:"false" cty:"decode_authorization_messages"` InsecureSkipTLSVerify *bool `mapstructure:"insecure_skip_tls_verify" required:"false" cty:"insecure_skip_tls_verify"` + MaxRetries *int `mapstructure:"max_retries" required:"false" cty:"max_retries"` MFACode *string `mapstructure:"mfa_code" required:"false" cty:"mfa_code"` ProfileName *string `mapstructure:"profile" required:"false" cty:"profile"` RawRegion *string `mapstructure:"region" required:"true" cty:"region"` @@ -147,6 +148,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { "custom_endpoint_ec2": &hcldec.AttrSpec{Name: "custom_endpoint_ec2", Type: cty.String, Required: false}, "decode_authorization_messages": &hcldec.AttrSpec{Name: "decode_authorization_messages", Type: cty.Bool, Required: false}, "insecure_skip_tls_verify": &hcldec.AttrSpec{Name: "insecure_skip_tls_verify", Type: cty.Bool, Required: false}, + "max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false}, "mfa_code": &hcldec.AttrSpec{Name: "mfa_code", Type: cty.String, Required: false}, "profile": &hcldec.AttrSpec{Name: "profile", Type: cty.String, Required: false}, "region": &hcldec.AttrSpec{Name: "region", Type: cty.String, Required: false}, diff --git a/builder/amazon/ebssurrogate/builder.hcl2spec.go b/builder/amazon/ebssurrogate/builder.hcl2spec.go index 81d98f448..45b5f6548 100644 --- a/builder/amazon/ebssurrogate/builder.hcl2spec.go +++ b/builder/amazon/ebssurrogate/builder.hcl2spec.go @@ -64,6 +64,7 @@ type FlatConfig struct { CustomEndpointEc2 *string `mapstructure:"custom_endpoint_ec2" required:"false" cty:"custom_endpoint_ec2"` DecodeAuthZMessages *bool `mapstructure:"decode_authorization_messages" required:"false" cty:"decode_authorization_messages"` InsecureSkipTLSVerify *bool `mapstructure:"insecure_skip_tls_verify" required:"false" cty:"insecure_skip_tls_verify"` + MaxRetries *int `mapstructure:"max_retries" required:"false" cty:"max_retries"` MFACode *string `mapstructure:"mfa_code" required:"false" cty:"mfa_code"` ProfileName *string `mapstructure:"profile" required:"false" cty:"profile"` RawRegion *string `mapstructure:"region" required:"true" cty:"region"` @@ -191,6 +192,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { "custom_endpoint_ec2": &hcldec.AttrSpec{Name: "custom_endpoint_ec2", Type: cty.String, Required: false}, "decode_authorization_messages": &hcldec.AttrSpec{Name: "decode_authorization_messages", Type: cty.Bool, Required: false}, "insecure_skip_tls_verify": &hcldec.AttrSpec{Name: "insecure_skip_tls_verify", Type: cty.Bool, Required: false}, + "max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false}, "mfa_code": &hcldec.AttrSpec{Name: "mfa_code", Type: cty.String, Required: false}, "profile": &hcldec.AttrSpec{Name: "profile", Type: cty.String, Required: false}, "region": &hcldec.AttrSpec{Name: "region", Type: cty.String, Required: false}, diff --git a/builder/amazon/ebsvolume/builder.hcl2spec.go b/builder/amazon/ebsvolume/builder.hcl2spec.go index 34a5199cb..5f2467101 100644 --- a/builder/amazon/ebsvolume/builder.hcl2spec.go +++ b/builder/amazon/ebsvolume/builder.hcl2spec.go @@ -64,6 +64,7 @@ type FlatConfig struct { CustomEndpointEc2 *string `mapstructure:"custom_endpoint_ec2" required:"false" cty:"custom_endpoint_ec2"` DecodeAuthZMessages *bool `mapstructure:"decode_authorization_messages" required:"false" cty:"decode_authorization_messages"` InsecureSkipTLSVerify *bool `mapstructure:"insecure_skip_tls_verify" required:"false" cty:"insecure_skip_tls_verify"` + MaxRetries *int `mapstructure:"max_retries" required:"false" cty:"max_retries"` MFACode *string `mapstructure:"mfa_code" required:"false" cty:"mfa_code"` ProfileName *string `mapstructure:"profile" required:"false" cty:"profile"` RawRegion *string `mapstructure:"region" required:"true" cty:"region"` @@ -171,6 +172,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { "custom_endpoint_ec2": &hcldec.AttrSpec{Name: "custom_endpoint_ec2", Type: cty.String, Required: false}, "decode_authorization_messages": &hcldec.AttrSpec{Name: "decode_authorization_messages", Type: cty.Bool, Required: false}, "insecure_skip_tls_verify": &hcldec.AttrSpec{Name: "insecure_skip_tls_verify", Type: cty.Bool, Required: false}, + "max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false}, "mfa_code": &hcldec.AttrSpec{Name: "mfa_code", Type: cty.String, Required: false}, "profile": &hcldec.AttrSpec{Name: "profile", Type: cty.String, Required: false}, "region": &hcldec.AttrSpec{Name: "region", Type: cty.String, Required: false}, diff --git a/builder/amazon/instance/builder.hcl2spec.go b/builder/amazon/instance/builder.hcl2spec.go index 2726a0994..388b28432 100644 --- a/builder/amazon/instance/builder.hcl2spec.go +++ b/builder/amazon/instance/builder.hcl2spec.go @@ -21,6 +21,7 @@ type FlatConfig struct { CustomEndpointEc2 *string `mapstructure:"custom_endpoint_ec2" required:"false" cty:"custom_endpoint_ec2"` DecodeAuthZMessages *bool `mapstructure:"decode_authorization_messages" required:"false" cty:"decode_authorization_messages"` InsecureSkipTLSVerify *bool `mapstructure:"insecure_skip_tls_verify" required:"false" cty:"insecure_skip_tls_verify"` + MaxRetries *int `mapstructure:"max_retries" required:"false" cty:"max_retries"` MFACode *string `mapstructure:"mfa_code" required:"false" cty:"mfa_code"` ProfileName *string `mapstructure:"profile" required:"false" cty:"profile"` RawRegion *string `mapstructure:"region" required:"true" cty:"region"` @@ -154,6 +155,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { "custom_endpoint_ec2": &hcldec.AttrSpec{Name: "custom_endpoint_ec2", Type: cty.String, Required: false}, "decode_authorization_messages": &hcldec.AttrSpec{Name: "decode_authorization_messages", Type: cty.Bool, Required: false}, "insecure_skip_tls_verify": &hcldec.AttrSpec{Name: "insecure_skip_tls_verify", Type: cty.Bool, Required: false}, + "max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false}, "mfa_code": &hcldec.AttrSpec{Name: "mfa_code", Type: cty.String, Required: false}, "profile": &hcldec.AttrSpec{Name: "profile", Type: cty.String, Required: false}, "region": &hcldec.AttrSpec{Name: "region", Type: cty.String, Required: false}, diff --git a/post-processor/amazon-import/post-processor.hcl2spec.go b/post-processor/amazon-import/post-processor.hcl2spec.go index 60ecb8f08..25738e586 100644 --- a/post-processor/amazon-import/post-processor.hcl2spec.go +++ b/post-processor/amazon-import/post-processor.hcl2spec.go @@ -21,6 +21,7 @@ type FlatConfig struct { CustomEndpointEc2 *string `mapstructure:"custom_endpoint_ec2" required:"false" cty:"custom_endpoint_ec2"` DecodeAuthZMessages *bool `mapstructure:"decode_authorization_messages" required:"false" cty:"decode_authorization_messages"` InsecureSkipTLSVerify *bool `mapstructure:"insecure_skip_tls_verify" required:"false" cty:"insecure_skip_tls_verify"` + MaxRetries *int `mapstructure:"max_retries" required:"false" cty:"max_retries"` MFACode *string `mapstructure:"mfa_code" required:"false" cty:"mfa_code"` ProfileName *string `mapstructure:"profile" required:"false" cty:"profile"` RawRegion *string `mapstructure:"region" required:"true" cty:"region"` @@ -69,6 +70,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { "custom_endpoint_ec2": &hcldec.AttrSpec{Name: "custom_endpoint_ec2", Type: cty.String, Required: false}, "decode_authorization_messages": &hcldec.AttrSpec{Name: "decode_authorization_messages", Type: cty.Bool, Required: false}, "insecure_skip_tls_verify": &hcldec.AttrSpec{Name: "insecure_skip_tls_verify", Type: cty.Bool, Required: false}, + "max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false}, "mfa_code": &hcldec.AttrSpec{Name: "mfa_code", Type: cty.String, Required: false}, "profile": &hcldec.AttrSpec{Name: "profile", Type: cty.String, Required: false}, "region": &hcldec.AttrSpec{Name: "region", Type: cty.String, Required: false}, diff --git a/website/source/partials/builder/amazon/common/_AccessConfig-not-required.html.md b/website/source/partials/builder/amazon/common/_AccessConfig-not-required.html.md index 4b7b05825..275f27fb5 100644 --- a/website/source/partials/builder/amazon/common/_AccessConfig-not-required.html.md +++ b/website/source/partials/builder/amazon/common/_AccessConfig-not-required.html.md @@ -12,6 +12,10 @@ - `insecure_skip_tls_verify` (bool) - This allows skipping TLS verification of the AWS EC2 endpoint. The default is false. +- `max_retries` (int) - This is the maximum number of times an API call is retried, in the case + where requests are being throttled or experiencing transient failures. + The delay between the subsequent API calls increases exponentially. + - `mfa_code` (string) - The MFA [TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm) code. This should probably be a user variable since it changes all the From a684fae28f596b72da95557aef07572158910e59 Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Fri, 7 Feb 2020 14:13:19 -0500 Subject: [PATCH 19/61] mapstructure-to-hcl2: Update code formatter to use golang/x/tools/imports --- cmd/mapstructure-to-hcl2/mapstructure-to-hcl2.go | 8 ++++---- packer/builder_mock.hcl2spec.go | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/cmd/mapstructure-to-hcl2/mapstructure-to-hcl2.go b/cmd/mapstructure-to-hcl2/mapstructure-to-hcl2.go index 54359cbe5..3721129e8 100644 --- a/cmd/mapstructure-to-hcl2/mapstructure-to-hcl2.go +++ b/cmd/mapstructure-to-hcl2/mapstructure-to-hcl2.go @@ -25,7 +25,6 @@ import ( "bytes" "flag" "fmt" - "go/format" "go/types" "io" "log" @@ -39,6 +38,7 @@ import ( "github.com/zclconf/go-cty/cty" "golang.org/x/tools/go/packages" + "golang.org/x/tools/imports" ) var ( @@ -189,7 +189,7 @@ func main() { log.Fatalf("os.Create: %v", err) } - _, err = outputFile.Write(goFmt(out.Bytes())) + _, err = outputFile.Write(goFmt(outputFile.Name(), out.Bytes())) if err != nil { log.Fatalf("failed to write file: %v", err) } @@ -575,8 +575,8 @@ func ToSnakeCase(str string) string { return strings.ToLower(snake) } -func goFmt(b []byte) []byte { - fb, err := format.Source(b) +func goFmt(filename string, b []byte) []byte { + fb, err := imports.Process(filename, b, nil) if err != nil { log.Printf("formatting err: %v", err) return b diff --git a/packer/builder_mock.hcl2spec.go b/packer/builder_mock.hcl2spec.go index a406ef9f0..a5ea27c82 100644 --- a/packer/builder_mock.hcl2spec.go +++ b/packer/builder_mock.hcl2spec.go @@ -2,9 +2,10 @@ package packer import ( + "io" + "github.com/hashicorp/hcl/v2/hcldec" "github.com/zclconf/go-cty/cty" - "io" ) // FlatMockBuilder is an auto-generated flat version of MockBuilder. From 8825bf2cd756e527d387e23e534390e91b918fb2 Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Fri, 7 Feb 2020 14:23:36 -0500 Subject: [PATCH 20/61] go.mod: Add golang.org/x/tools/imports ``` go get golang.org/x/tools/imports go mod tidy go mod vendor ``` --- go.mod | 2 +- go.sum | 9 +- .../x/tools/cmd/goimports/goimports.go | 8 +- vendor/golang.org/x/tools/go/analysis/doc.go | 77 +- .../x/tools/go/ast/astutil/imports.go | 5 +- .../x/tools/go/ast/inspector/inspector.go | 4 +- .../go/internal/gcimporter/gcimporter.go | 8 +- vendor/golang.org/x/tools/go/packages/doc.go | 3 +- .../x/tools/go/packages/external.go | 7 +- .../golang.org/x/tools/go/packages/golist.go | 716 ++++++------------ .../x/tools/go/packages/golist_overlay.go | 201 +++-- .../x/tools/go/packages/packages.go | 38 +- vendor/golang.org/x/tools/imports/forward.go | 67 ++ .../x/tools/internal/fastwalk/fastwalk.go | 10 +- .../internal/fastwalk/fastwalk_portable.go | 2 +- .../tools/internal/fastwalk/fastwalk_unix.go | 2 +- .../x/tools/internal/gopathwalk/walk.go | 11 +- .../x/tools/internal/imports/fix.go | 594 ++++++++------- .../x/tools/internal/imports/imports.go | 37 +- .../x/tools/internal/imports/mod.go | 271 ++++--- .../x/tools/internal/imports/mod_cache.go | 95 ++- .../internal/packagesinternal/packages.go | 4 + .../golang.org/x/tools/internal/span/parse.go | 100 --- .../golang.org/x/tools/internal/span/span.go | 285 ------- .../golang.org/x/tools/internal/span/token.go | 179 ----- .../x/tools/internal/span/token111.go | 39 - .../x/tools/internal/span/token112.go | 16 - .../golang.org/x/tools/internal/span/uri.go | 152 ---- .../golang.org/x/tools/internal/span/utf16.go | 94 --- vendor/modules.txt | 5 +- 30 files changed, 1124 insertions(+), 1917 deletions(-) create mode 100644 vendor/golang.org/x/tools/imports/forward.go create mode 100644 vendor/golang.org/x/tools/internal/packagesinternal/packages.go delete mode 100644 vendor/golang.org/x/tools/internal/span/parse.go delete mode 100644 vendor/golang.org/x/tools/internal/span/span.go delete mode 100644 vendor/golang.org/x/tools/internal/span/token.go delete mode 100644 vendor/golang.org/x/tools/internal/span/token111.go delete mode 100644 vendor/golang.org/x/tools/internal/span/token112.go delete mode 100644 vendor/golang.org/x/tools/internal/span/uri.go delete mode 100644 vendor/golang.org/x/tools/internal/span/utf16.go diff --git a/go.mod b/go.mod index 458eac26c..0bef6cb31 100644 --- a/go.mod +++ b/go.mod @@ -163,7 +163,7 @@ require ( golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect - golang.org/x/tools v0.0.0-20191203051722-db047d72ee39 + golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa google.golang.org/api v0.14.0 google.golang.org/appengine v1.6.5 // indirect google.golang.org/genproto v0.0.0-20191115221424-83cc0476cb11 // indirect diff --git a/go.sum b/go.sum index f68d86128..b2c1f52fd 100644 --- a/go.sum +++ b/go.sum @@ -500,6 +500,7 @@ golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200117160349-530e935923ad h1:Jh8cai0fqIK+f6nG0UgPW5wFk8wmiMhM3AyciDBdtQg= golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -527,6 +528,8 @@ golang.org/x/mobile v0.0.0-20191130191448-5c0e7e404af8 h1:9w7mvrikkrG9zFfEJfuFe0 golang.org/x/mobile v0.0.0-20191130191448-5c0e7e404af8/go.mod h1:p895TfNkDgPEmEQrNiOtIl3j98d/tGU95djDj7NfyjQ= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -620,9 +623,11 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191203051722-db047d72ee39 h1:zARK4PTmTfx1BC6iKP21qIRjz0nFzFj4ZAlbUy6Q6pM= -golang.org/x/tools v0.0.0-20191203051722-db047d72ee39/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa h1:5E4dL8+NgFOgjwbTKz+OOEGGhP+ectTmF842l6KjupQ= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0 h1:KKgc1aqhV8wDPbDzlDtpvyjZFY3vjz85FP7p4wcQUyI= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vendor/golang.org/x/tools/cmd/goimports/goimports.go index a476a7f3c..2cca29235 100644 --- a/vendor/golang.org/x/tools/cmd/goimports/goimports.go +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports.go @@ -45,8 +45,12 @@ var ( Fragment: true, // This environment, and its caches, will be reused for the whole run. Env: &imports.ProcessEnv{ - GOPATH: build.Default.GOPATH, - GOROOT: build.Default.GOROOT, + GOPATH: build.Default.GOPATH, + GOROOT: build.Default.GOROOT, + GOFLAGS: os.Getenv("GOFLAGS"), + GO111MODULE: os.Getenv("GO111MODULE"), + GOPROXY: os.Getenv("GOPROXY"), + GOSUMDB: os.Getenv("GOSUMDB"), }, } exitCode = 0 diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go index a2353fc88..ea56b724e 100644 --- a/vendor/golang.org/x/tools/go/analysis/doc.go +++ b/vendor/golang.org/x/tools/go/analysis/doc.go @@ -1,8 +1,9 @@ /* -The analysis package defines the interface between a modular static +Package analysis defines the interface between a modular static analysis and an analysis driver program. + Background A static analysis is a function that inspects a package of Go code and @@ -41,9 +42,9 @@ the go/analysis/passes/ subdirectory: package unusedresult var Analyzer = &analysis.Analyzer{ - Name: "unusedresult", - Doc: "check for unused results of calls to some functions", - Run: run, + Name: "unusedresult", + Doc: "check for unused results of calls to some functions", + Run: run, ... } @@ -51,7 +52,6 @@ the go/analysis/passes/ subdirectory: ... } - An analysis driver is a program such as vet that runs a set of analyses and prints the diagnostics that they report. The driver program must import the list of Analyzers it needs. @@ -70,51 +70,18 @@ A driver may use the name, flags, and documentation to provide on-line help that describes the analyses it performs. The doc comment contains a brief one-line summary, optionally followed by paragraphs of explanation. -The vet command, shown below, is an example of a driver that runs -multiple analyzers. It is based on the multichecker package -(see the "Standalone commands" section for details). - - $ go build golang.org/x/tools/go/analysis/cmd/vet - $ ./vet help - vet is a tool for static analysis of Go programs. - - Usage: vet [-flag] [package] - - Registered analyzers: - - asmdecl report mismatches between assembly files and Go declarations - assign check for useless assignments - atomic check for common mistakes using the sync/atomic package - ... - unusedresult check for unused results of calls to some functions - - $ ./vet help unusedresult - unusedresult: check for unused results of calls to some functions - - Analyzer flags: - - -unusedresult.funcs value - comma-separated list of functions whose results must be used (default Error,String) - -unusedresult.stringmethods value - comma-separated list of names of methods of type func() string whose results must be used - - Some functions like fmt.Errorf return a result and have no side effects, - so it is always a mistake to discard the result. This analyzer reports - calls to certain functions in which the result of the call is ignored. - - The set of functions may be controlled using flags. The Analyzer type has more fields besides those shown above: type Analyzer struct { - Name string - Doc string - Flags flag.FlagSet - Run func(*Pass) (interface{}, error) - RunDespiteErrors bool - ResultType reflect.Type - Requires []*Analyzer - FactTypes []Fact + Name string + Doc string + Flags flag.FlagSet + Run func(*Pass) (interface{}, error) + RunDespiteErrors bool + ResultType reflect.Type + Requires []*Analyzer + FactTypes []Fact } The Flags field declares a set of named (global) flag variables that @@ -154,13 +121,13 @@ package being analyzed, and provides operations to the Run function for reporting diagnostics and other information back to the driver. type Pass struct { - Fset *token.FileSet - Files []*ast.File - OtherFiles []string - Pkg *types.Package - TypesInfo *types.Info - ResultOf map[*Analyzer]interface{} - Report func(Diagnostic) + Fset *token.FileSet + Files []*ast.File + OtherFiles []string + Pkg *types.Package + TypesInfo *types.Info + ResultOf map[*Analyzer]interface{} + Report func(Diagnostic) ... } @@ -245,7 +212,7 @@ package. An Analyzer that uses facts must declare their types: var Analyzer = &analysis.Analyzer{ - Name: "printf", + Name: "printf", FactTypes: []analysis.Fact{new(isWrapper)}, ... } @@ -330,7 +297,5 @@ entirety as: A tool that provides multiple analyzers can use multichecker in a similar way, giving it the list of Analyzers. - - */ package analysis diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index 3e4b19536..2087ceec9 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -275,9 +275,10 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del // We deleted an entry but now there may be // a blank line-sized hole where the import was. - if line-lastLine > 1 { + if line-lastLine > 1 || !gen.Rparen.IsValid() { // There was a blank line immediately preceding the deleted import, - // so there's no need to close the hole. + // so there's no need to close the hole. The right parenthesis is + // invalid after AddImport to an import statement without parenthesis. // Do nothing. } else if line != fset.File(gen.Rparen).LineCount() { // There was no blank line. Close the hole. diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index ddbdd3f08..3084508b5 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -90,7 +90,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // The types argument, if non-empty, enables type-based filtering of // events. The function f if is called only for nodes whose type // matches an element of the types slice. -func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (prune bool)) { +func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { mask := maskOf(types) for i := 0; i < len(in.events); { ev := in.events[i] @@ -114,7 +114,7 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (prun // supplies each call to f an additional argument, the current // traversal stack. The stack's first element is the outermost node, // an *ast.File; its last is the innermost, n. -func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (prune bool)) { +func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { mask := maskOf(types) var stack []ast.Node for i := 0; i < len(in.events); { diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go index 9cf186605..8dcd8bbb7 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -344,7 +344,7 @@ func (p *parser) expectKeyword(keyword string) { // PackageId = string_lit . // -func (p *parser) parsePackageId() string { +func (p *parser) parsePackageID() string { id, err := strconv.Unquote(p.expect(scanner.String)) if err != nil { p.error(err) @@ -384,7 +384,7 @@ func (p *parser) parseDotIdent() string { // func (p *parser) parseQualifiedName() (id, name string) { p.expect('@') - id = p.parsePackageId() + id = p.parsePackageID() p.expect('.') // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. if p.tok == '?' { @@ -696,7 +696,7 @@ func (p *parser) parseInterfaceType(parent *types.Package) types.Type { // Complete requires the type's embedded interfaces to be fully defined, // but we do not define any - return types.NewInterface(methods, nil).Complete() + return newInterface(methods, nil).Complete() } // ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . @@ -785,7 +785,7 @@ func (p *parser) parseType(parent *types.Package) types.Type { func (p *parser) parseImportDecl() { p.expectKeyword("import") name := p.parsePackageName() - p.getPkg(p.parsePackageId(), name) + p.getPkg(p.parsePackageID(), name) } // int_lit = [ "+" | "-" ] { "0" ... "9" } . diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 3799f8ed8..4bfe28a51 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -60,8 +60,7 @@ causes Load to run in LoadFiles mode, collecting minimal information. See the documentation for type Config for details. As noted earlier, the Config.Mode controls the amount of detail -reported about the loaded packages, with each mode returning all the data of the -previous mode with some extra added. See the documentation for type LoadMode +reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 6ac3e4f5b..8c8473fd0 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -84,13 +84,14 @@ func findExternalDriver(cfg *Config) driver { cmd.Stdin = bytes.NewReader(req) cmd.Stdout = buf cmd.Stderr = stderr - if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { - fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr) - } if err := cmd.Run(); err != nil { return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) } + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr) + } + var response driverResponse if err := json.Unmarshal(buf.Bytes(), &response); err != nil { return nil, err diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 648e36431..fc0b28ecf 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -6,17 +6,17 @@ package packages import ( "bytes" + "context" "encoding/json" "fmt" "go/types" - "io/ioutil" "log" "os" "os/exec" "path" "path/filepath" "reflect" - "regexp" + "sort" "strconv" "strings" "sync" @@ -24,9 +24,6 @@ import ( "unicode" "golang.org/x/tools/go/internal/packagesdriver" - "golang.org/x/tools/internal/gopathwalk" - "golang.org/x/tools/internal/semver" - "golang.org/x/tools/internal/span" ) // debug controls verbose logging. @@ -45,16 +42,21 @@ type responseDeduper struct { dr *driverResponse } -// init fills in r with a driverResponse. -func (r *responseDeduper) init(dr *driverResponse) { - r.dr = dr - r.seenRoots = map[string]bool{} - r.seenPackages = map[string]*Package{} +func newDeduper() *responseDeduper { + return &responseDeduper{ + dr: &driverResponse{}, + seenRoots: map[string]bool{}, + seenPackages: map[string]*Package{}, + } +} + +// addAll fills in r with a driverResponse. +func (r *responseDeduper) addAll(dr *driverResponse) { for _, pkg := range dr.Packages { - r.seenPackages[pkg.ID] = pkg + r.addPackage(pkg) } for _, root := range dr.Roots { - r.seenRoots[root] = true + r.addRoot(root) } } @@ -74,25 +76,47 @@ func (r *responseDeduper) addRoot(id string) { r.dr.Roots = append(r.dr.Roots, id) } -// goInfo contains global information from the go tool. -type goInfo struct { - rootDirs map[string]string - env goEnv +type golistState struct { + cfg *Config + ctx context.Context + + envOnce sync.Once + goEnvError error + goEnv map[string]string + + rootsOnce sync.Once + rootDirsError error + rootDirs map[string]string + + // vendorDirs caches the (non)existence of vendor directories. + vendorDirs map[string]bool } -type goEnv struct { - modulesOn bool +// getEnv returns Go environment variables. Only specific variables are +// populated -- computing all of them is slow. +func (state *golistState) getEnv() (map[string]string, error) { + state.envOnce.Do(func() { + var b *bytes.Buffer + b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") + if state.goEnvError != nil { + return + } + + state.goEnv = make(map[string]string) + decoder := json.NewDecoder(b) + if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { + return + } + }) + return state.goEnv, state.goEnvError } -func determineEnv(cfg *Config) goEnv { - buf, err := invokeGo(cfg, "env", "GOMOD") +// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. +func (state *golistState) mustGetEnv() map[string]string { + env, err := state.getEnv() if err != nil { - return goEnv{} + panic(fmt.Sprintf("mustGetEnv: %v", err)) } - gomod := bytes.TrimSpace(buf.Bytes()) - - env := goEnv{} - env.modulesOn = len(gomod) > 0 return env } @@ -100,47 +124,38 @@ func determineEnv(cfg *Config) goEnv { // the build system package structure. // See driver for more details. func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { - var sizes types.Sizes + // Make sure that any asynchronous go commands are killed when we return. + parentCtx := cfg.Context + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + response := newDeduper() + + // Fill in response.Sizes asynchronously if necessary. var sizeserr error var sizeswg sync.WaitGroup if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { sizeswg.Add(1) go func() { - sizes, sizeserr = getSizes(cfg) + var sizes types.Sizes + sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) + // types.SizesFor always returns nil or a *types.StdSizes. + response.dr.Sizes, _ = sizes.(*types.StdSizes) sizeswg.Done() }() } - defer sizeswg.Wait() - // start fetching rootDirs - var info goInfo - var rootDirsReady, envReady = make(chan struct{}), make(chan struct{}) - go func() { - info.rootDirs = determineRootDirs(cfg) - close(rootDirsReady) - }() - go func() { - info.env = determineEnv(cfg) - close(envReady) - }() - getGoInfo := func() *goInfo { - <-rootDirsReady - <-envReady - return &info - } - - // Ensure that we don't leak goroutines: Load is synchronous, so callers will - // not expect it to access the fields of cfg after the call returns. - defer getGoInfo() - - // always pass getGoInfo to golistDriver - golistDriver := func(cfg *Config, patterns ...string) (*driverResponse, error) { - return golistDriver(cfg, getGoInfo, patterns...) + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, } // Determine files requested in contains patterns var containFiles []string - var packagesNamed []string restPatterns := make([]string, 0, len(patterns)) // Extract file= and other [querytype]= patterns. Report an error if querytype // doesn't exist. @@ -156,8 +171,6 @@ extractQueries: containFiles = append(containFiles, value) case "pattern": restPatterns = append(restPatterns, value) - case "iamashamedtousethedisabledqueryname": - packagesNamed = append(packagesNamed, value) case "": // not a reserved query restPatterns = append(restPatterns, pattern) default: @@ -173,52 +186,34 @@ extractQueries: } } - response := &responseDeduper{} - var err error - // See if we have any patterns to pass through to go list. Zero initial // patterns also requires a go list call, since it's the equivalent of // ".". if len(restPatterns) > 0 || len(patterns) == 0 { - dr, err := golistDriver(cfg, restPatterns...) + dr, err := state.createDriverResponse(restPatterns...) if err != nil { return nil, err } - response.init(dr) - } else { - response.init(&driverResponse{}) + response.addAll(dr) } - sizeswg.Wait() - if sizeserr != nil { - return nil, sizeserr - } - // types.SizesFor always returns nil or a *types.StdSizes - response.dr.Sizes, _ = sizes.(*types.StdSizes) - - var containsCandidates []string - if len(containFiles) != 0 { - if err := runContainsQueries(cfg, golistDriver, response, containFiles, getGoInfo); err != nil { + if err := state.runContainsQueries(response, containFiles); err != nil { return nil, err } } - if len(packagesNamed) != 0 { - if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil { - return nil, err - } - } - - modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) + modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) if err != nil { return nil, err } + + var containsCandidates []string if len(containFiles) > 0 { containsCandidates = append(containsCandidates, modifiedPkgs...) containsCandidates = append(containsCandidates, needPkgs...) } - if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs, getGoInfo); err != nil { + if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { return nil, err } // Check candidate packages for containFiles. @@ -247,33 +242,32 @@ extractQueries: } } + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } return response.dr, nil } -func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string, getGoInfo func() *goInfo) error { +func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { if len(pkgs) == 0 { return nil } - drivercfg := *cfg - if getGoInfo().env.modulesOn { - drivercfg.BuildFlags = append(drivercfg.BuildFlags, "-mod=readonly") - } - dr, err := driver(&drivercfg, pkgs...) - + dr, err := state.createDriverResponse(pkgs...) if err != nil { return err } for _, pkg := range dr.Packages { response.addPackage(pkg) } - _, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) + _, needPkgs, err := state.processGolistOverlay(response) if err != nil { return err } - return addNeededOverlayPackages(cfg, driver, response, needPkgs, getGoInfo) + return state.addNeededOverlayPackages(response, needPkgs) } -func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string, goInfo func() *goInfo) error { +func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. fdir := filepath.Dir(query) @@ -283,42 +277,16 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q if err != nil { return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) } - dirResponse, err := driver(cfg, pattern) - if err != nil { + dirResponse, err := state.createDriverResponse(pattern) + + // If there was an error loading the package, or the package is returned + // with errors, try to load the file as an ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're + // in module mode and the ad-hoc is located outside a module. + if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + len(dirResponse.Packages[0].Errors) == 1 { var queryErr error - if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil { - return err // return the original error - } - } - // `go list` can report errors for files that are not listed as part of a package's GoFiles. - // In the case of an invalid Go file, we should assume that it is part of package if only - // one package is in the response. The file may have valid contents in an overlay. - if len(dirResponse.Packages) == 1 { - pkg := dirResponse.Packages[0] - for i, err := range pkg.Errors { - s := errorSpan(err) - if !s.IsValid() { - break - } - if len(pkg.CompiledGoFiles) == 0 { - break - } - dir := filepath.Dir(pkg.CompiledGoFiles[0]) - filename := filepath.Join(dir, filepath.Base(s.URI().Filename())) - if info, err := os.Stat(filename); err != nil || info.IsDir() { - break - } - if !contains(pkg.CompiledGoFiles, filename) { - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) - pkg.GoFiles = append(pkg.GoFiles, filename) - pkg.Errors = append(pkg.Errors[:i], pkg.Errors[i+1:]...) - } - } - } - // A final attempt to construct an ad-hoc package. - if len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1 { - var queryErr error - if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil { + if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { return err // return the original error } } @@ -347,345 +315,47 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q return nil } -// adHocPackage attempts to construct an ad-hoc package given a query that failed. -func adHocPackage(cfg *Config, driver driver, pattern, query string) (*driverResponse, error) { - // There was an error loading the package. Try to load the file as an ad-hoc package. - // Usually the error will appear in a returned package, but may not if we're in modules mode - // and the ad-hoc is located outside a module. - dirResponse, err := driver(cfg, query) +// adhocPackage attempts to load or construct an ad-hoc package for a given +// query, if the original call to the driver produced inadequate results. +func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { + response, err := state.createDriverResponse(query) if err != nil { return nil, err } - // If we get nothing back from `go list`, try to make this file into its own ad-hoc package. - if len(dirResponse.Packages) == 0 && err == nil { - dirResponse.Packages = append(dirResponse.Packages, &Package{ + // If we get nothing back from `go list`, + // try to make this file into its own ad-hoc package. + // TODO(rstambler): Should this check against the original response? + if len(response.Packages) == 0 { + response.Packages = append(response.Packages, &Package{ ID: "command-line-arguments", PkgPath: query, GoFiles: []string{query}, CompiledGoFiles: []string{query}, Imports: make(map[string]*Package), }) - dirResponse.Roots = append(dirResponse.Roots, "command-line-arguments") + response.Roots = append(response.Roots, "command-line-arguments") } - // Special case to handle issue #33482: - // If this is a file= query for ad-hoc packages where the file only exists on an overlay, - // and exists outside of a module, add the file in for the package. - if len(dirResponse.Packages) == 1 && (dirResponse.Packages[0].ID == "command-line-arguments" || - filepath.ToSlash(dirResponse.Packages[0].PkgPath) == filepath.ToSlash(query)) { - if len(dirResponse.Packages[0].GoFiles) == 0 { - filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath - // TODO(matloob): check if the file is outside of a root dir? - for path := range cfg.Overlay { - if path == filename { - dirResponse.Packages[0].Errors = nil - dirResponse.Packages[0].GoFiles = []string{path} - dirResponse.Packages[0].CompiledGoFiles = []string{path} + // Handle special cases. + if len(response.Packages) == 1 { + // golang/go#33482: If this is a file= query for ad-hoc packages where + // the file only exists on an overlay, and exists outside of a module, + // add the file to the package and remove the errors. + if response.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { + if len(response.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range state.cfg.Overlay { + if path == filename { + response.Packages[0].Errors = nil + response.Packages[0].GoFiles = []string{path} + response.Packages[0].CompiledGoFiles = []string{path} + } } } } } - return dirResponse, nil -} - -func contains(files []string, filename string) bool { - for _, f := range files { - if f == filename { - return true - } - } - return false -} - -// errorSpan attempts to parse a standard `go list` error message -// by stripping off the trailing error message. -// -// It works only on errors whose message is prefixed by colon, -// followed by a space (": "). For example: -// -// attributes.go:13:1: expected 'package', found 'type' -// -func errorSpan(err Error) span.Span { - if err.Pos == "" { - input := strings.TrimSpace(err.Msg) - msgIndex := strings.Index(input, ": ") - if msgIndex < 0 { - return span.Parse(input) - } - return span.Parse(input[:msgIndex]) - } - return span.Parse(err.Pos) -} - -// modCacheRegexp splits a path in a module cache into module, module version, and package. -var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) - -func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { - // calling `go env` isn't free; bail out if there's nothing to do. - if len(queries) == 0 { - return nil - } - // Determine which directories are relevant to scan. - roots, modRoot, err := roots(cfg) - if err != nil { - return err - } - - // Scan the selected directories. Simple matches, from GOPATH/GOROOT - // or the local module, can simply be "go list"ed. Matches from the - // module cache need special treatment. - var matchesMu sync.Mutex - var simpleMatches, modCacheMatches []string - add := func(root gopathwalk.Root, dir string) { - // Walk calls this concurrently; protect the result slices. - matchesMu.Lock() - defer matchesMu.Unlock() - - path := dir - if dir != root.Path { - path = dir[len(root.Path)+1:] - } - if pathMatchesQueries(path, queries) { - switch root.Type { - case gopathwalk.RootModuleCache: - modCacheMatches = append(modCacheMatches, path) - case gopathwalk.RootCurrentModule: - // We'd need to read go.mod to find the full - // import path. Relative's easier. - rel, err := filepath.Rel(cfg.Dir, dir) - if err != nil { - // This ought to be impossible, since - // we found dir in the current module. - panic(err) - } - simpleMatches = append(simpleMatches, "./"+rel) - case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT: - simpleMatches = append(simpleMatches, path) - } - } - } - - startWalk := time.Now() - gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug}) - cfg.Logf("%v for walk", time.Since(startWalk)) - - // Weird special case: the top-level package in a module will be in - // whatever directory the user checked the repository out into. It's - // more reasonable for that to not match the package name. So, if there - // are any Go files in the mod root, query it just to be safe. - if modRoot != "" { - rel, err := filepath.Rel(cfg.Dir, modRoot) - if err != nil { - panic(err) // See above. - } - - files, err := ioutil.ReadDir(modRoot) - if err != nil { - panic(err) // See above. - } - - for _, f := range files { - if strings.HasSuffix(f.Name(), ".go") { - simpleMatches = append(simpleMatches, rel) - break - } - } - } - - addResponse := func(r *driverResponse) { - for _, pkg := range r.Packages { - response.addPackage(pkg) - for _, name := range queries { - if pkg.Name == name { - response.addRoot(pkg.ID) - break - } - } - } - } - - if len(simpleMatches) != 0 { - resp, err := driver(cfg, simpleMatches...) - if err != nil { - return err - } - addResponse(resp) - } - - // Module cache matches are tricky. We want to avoid downloading new - // versions of things, so we need to use the ones present in the cache. - // go list doesn't accept version specifiers, so we have to write out a - // temporary module, and do the list in that module. - if len(modCacheMatches) != 0 { - // Collect all the matches, deduplicating by major version - // and preferring the newest. - type modInfo struct { - mod string - major string - } - mods := make(map[modInfo]string) - var imports []string - for _, modPath := range modCacheMatches { - matches := modCacheRegexp.FindStringSubmatch(modPath) - mod, ver := filepath.ToSlash(matches[1]), matches[2] - importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3])) - - major := semver.Major(ver) - if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 { - mods[modInfo{mod, major}] = ver - } - - imports = append(imports, importPath) - } - - // Build the temporary module. - var gomod bytes.Buffer - gomod.WriteString("module modquery\nrequire (\n") - for mod, version := range mods { - gomod.WriteString("\t" + mod.mod + " " + version + "\n") - } - gomod.WriteString(")\n") - - tmpCfg := *cfg - - // We're only trying to look at stuff in the module cache, so - // disable the network. This should speed things up, and has - // prevented errors in at least one case, #28518. - tmpCfg.Env = append([]string{"GOPROXY=off"}, cfg.Env...) - - var err error - tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery") - if err != nil { - return err - } - defer os.RemoveAll(tmpCfg.Dir) - - if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil { - return fmt.Errorf("writing go.mod for module cache query: %v", err) - } - - // Run the query, using the import paths calculated from the matches above. - resp, err := driver(&tmpCfg, imports...) - if err != nil { - return fmt.Errorf("querying module cache matches: %v", err) - } - addResponse(resp) - } - - return nil -} - -func getSizes(cfg *Config) (types.Sizes, error) { - return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) -} - -// roots selects the appropriate paths to walk based on the passed-in configuration, -// particularly the environment and the presence of a go.mod in cfg.Dir's parents. -func roots(cfg *Config) ([]gopathwalk.Root, string, error) { - stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD") - if err != nil { - return nil, "", err - } - - fields := strings.Split(stdout.String(), "\n") - if len(fields) != 4 || len(fields[3]) != 0 { - return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String()) - } - goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2] - var modDir string - if gomod != "" { - modDir = filepath.Dir(gomod) - } - - var roots []gopathwalk.Root - // Always add GOROOT. - roots = append(roots, gopathwalk.Root{ - Path: filepath.Join(goroot, "/src"), - Type: gopathwalk.RootGOROOT, - }) - // If modules are enabled, scan the module dir. - if modDir != "" { - roots = append(roots, gopathwalk.Root{ - Path: modDir, - Type: gopathwalk.RootCurrentModule, - }) - } - // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode. - for _, p := range gopath { - if modDir != "" { - roots = append(roots, gopathwalk.Root{ - Path: filepath.Join(p, "/pkg/mod"), - Type: gopathwalk.RootModuleCache, - }) - } else { - roots = append(roots, gopathwalk.Root{ - Path: filepath.Join(p, "/src"), - Type: gopathwalk.RootGOPATH, - }) - } - } - - return roots, modDir, nil -} - -// These functions were copied from goimports. See further documentation there. - -// pathMatchesQueries is adapted from pkgIsCandidate. -// TODO: is it reasonable to do Contains here, rather than an exact match on a path component? -func pathMatchesQueries(path string, queries []string) bool { - lastTwo := lastTwoComponents(path) - for _, query := range queries { - if strings.Contains(lastTwo, query) { - return true - } - if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) { - lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) - if strings.Contains(lastTwo, query) { - return true - } - } - } - return false -} - -// lastTwoComponents returns at most the last two path components -// of v, using either / or \ as the path separator. -func lastTwoComponents(v string) string { - nslash := 0 - for i := len(v) - 1; i >= 0; i-- { - if v[i] == '/' || v[i] == '\\' { - nslash++ - if nslash == 2 { - return v[i:] - } - } - } - return v -} - -func hasHyphenOrUpperASCII(s string) bool { - for i := 0; i < len(s); i++ { - b := s[i] - if b == '-' || ('A' <= b && b <= 'Z') { - return true - } - } - return false -} - -func lowerASCIIAndRemoveHyphen(s string) (ret string) { - buf := make([]byte, 0, len(s)) - for i := 0; i < len(s); i++ { - b := s[i] - switch { - case b == '-': - continue - case 'A' <= b && b <= 'Z': - buf = append(buf, b+('a'-'A')) - default: - buf = append(buf, b) - } - } - return string(buf) + return response, nil } // Fields must match go list; @@ -730,10 +400,9 @@ func otherFiles(p *jsonPackage) [][]string { return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} } -// golistDriver uses the "go list" command to expand the pattern -// words and return metadata for the specified packages. dir may be -// "" and env may be nil, as per os/exec.Command. -func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driverResponse, error) { +// createDriverResponse uses the "go list" command to expand the pattern +// words and return a response for the specified packages. +func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -747,11 +416,13 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv // Run "go list" for complete // information on the specified packages. - buf, err := invokeGo(cfg, golistargs(cfg, words)...) + buf, err := state.invokeGo("list", golistargs(state.cfg, words)...) if err != nil { return nil, err } seen := make(map[string]*jsonPackage) + pkgs := make(map[string]*Package) + additionalErrors := make(map[string][]Error) // Decode the JSON and convert it to Package form. var response driverResponse for dec := json.NewDecoder(buf); dec.More(); { @@ -782,18 +453,72 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv // contained in a known module or GOPATH entry. This will allow the package to be // properly "reclaimed" when overlays are processed. if filepath.IsAbs(p.ImportPath) && p.Error != nil { - pkgPath, ok := getPkgPath(cfg, p.ImportPath, rootsDirs) + pkgPath, ok, err := state.getPkgPath(p.ImportPath) + if err != nil { + return nil, err + } if ok { p.ImportPath = pkgPath } } if old, found := seen[p.ImportPath]; found { - if !reflect.DeepEqual(p, old) { - return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + // If one version of the package has an error, and the other doesn't, assume + // that this is a case where go list is reporting a fake dependency variant + // of the imported package: When a package tries to invalidly import another + // package, go list emits a variant of the imported package (with the same + // import path, but with an error on it, and the package will have a + // DepError set on it). An example of when this can happen is for imports of + // main packages: main packages can not be imported, but they may be + // separately matched and listed by another pattern. + // See golang.org/issue/36188 for more details. + + // The plan is that eventually, hopefully in Go 1.15, the error will be + // reported on the importing package rather than the duplicate "fake" + // version of the imported package. Once all supported versions of Go + // have the new behavior this logic can be deleted. + // TODO(matloob): delete the workaround logic once all supported versions of + // Go return the errors on the proper package. + + // There should be exactly one version of a package that doesn't have an + // error. + if old.Error == nil && p.Error == nil { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + continue } - // skip the duplicate - continue + + // Determine if this package's error needs to be bubbled up. + // This is a hack, and we expect for go list to eventually set the error + // on the package. + if old.Error != nil { + var errkind string + if strings.Contains(old.Error.Err, "not an importable package") { + errkind = "not an importable package" + } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { + errkind = "use of internal package not allowed" + } + if errkind != "" { + if len(old.Error.ImportStack) < 2 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack with fewer than two elements`, errkind) + } + importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-2] + additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ + Pos: old.Error.Pos, + Msg: old.Error.Err, + Kind: ListError, + }) + } + } + + // Make sure that if there's a version of the package without an error, + // that's the one reported to the user. + if old.Error == nil { + continue + } + + // This package will replace the old one at the end of the loop. } seen[p.ImportPath] = p @@ -803,6 +528,7 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), + forTest: p.ForTest, } // Work around https://golang.org/issue/28749: @@ -879,35 +605,49 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv } if p.Error != nil { + msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. + // Address golang.org/issue/35964 by appending import stack to error message. + if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { + msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) + } pkg.Errors = append(pkg.Errors, Error{ - Pos: p.Error.Pos, - Msg: strings.TrimSpace(p.Error.Err), // Trim to work around golang.org/issue/32363. + Pos: p.Error.Pos, + Msg: msg, + Kind: ListError, }) } + pkgs[pkg.ID] = pkg + } + + for id, errs := range additionalErrors { + if p, ok := pkgs[id]; ok { + p.Errors = append(p.Errors, errs...) + } + } + for _, pkg := range pkgs { response.Packages = append(response.Packages, pkg) } + sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) return &response, nil } // getPkgPath finds the package path of a directory if it's relative to a root directory. -func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) { +func (state *golistState) getPkgPath(dir string) (string, bool, error) { absDir, err := filepath.Abs(dir) if err != nil { - cfg.Logf("error getting absolute path of %s: %v", dir, err) - return "", false + return "", false, err } - for rdir, rpath := range goInfo().rootDirs { - absRdir, err := filepath.Abs(rdir) - if err != nil { - cfg.Logf("error getting absolute path of %s: %v", rdir, err) - continue - } + roots, err := state.determineRootDirs() + if err != nil { + return "", false, err + } + + for rdir, rpath := range roots { // Make sure that the directory is in the module, // to avoid creating a path relative to another module. - if !strings.HasPrefix(absDir, absRdir) { - cfg.Logf("%s does not have prefix %s", absDir, absRdir) + if !strings.HasPrefix(absDir, rdir) { continue } // TODO(matloob): This doesn't properly handle symlinks. @@ -922,11 +662,11 @@ func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) { // Once the file is saved, gopls, or the next invocation of the tool will get the correct // result straight from golist. // TODO(matloob): Implement module tiebreaking? - return path.Join(rpath, filepath.ToSlash(r)), true + return path.Join(rpath, filepath.ToSlash(r)), true, nil } - return filepath.ToSlash(r), true + return filepath.ToSlash(r), true, nil } - return "", false + return "", false, nil } // absJoin absolutizes and flattens the lists of files. @@ -945,8 +685,8 @@ func absJoin(dir string, fileses ...[]string) (res []string) { func golistargs(cfg *Config, words []string) []string { const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo fullargs := []string{ - "list", "-e", "-json", - fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), + "-e", "-json", + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), fmt.Sprintf("-test=%t", cfg.Tests), fmt.Sprintf("-export=%t", usesExportData(cfg)), fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), @@ -961,10 +701,17 @@ func golistargs(cfg *Config, words []string) []string { } // invokeGo returns the stdout of a go command invocation. -func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + stdout := new(bytes.Buffer) stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, "go", args...) + goArgs := []string{verb} + if verb != "env" { + goArgs = append(goArgs, cfg.BuildFlags...) + } + goArgs = append(goArgs, args...) + cmd := exec.CommandContext(state.ctx, "go", goArgs...) // On darwin the cwd gets resolved to the real path, which breaks anything that // expects the working directory to keep the original path, including the // go command when dealing with modules. @@ -976,7 +723,7 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { cmd.Stdout = stdout cmd.Stderr = stderr defer func(start time.Time) { - cfg.Logf("%s for %v, stderr: <<%s>> stdout: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr, stdout) + cfg.Logf("%s for %v, stderr: <<%s>> stdout: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, goArgs...), stderr, stdout) }(time.Now()) if err := cmd.Run(); err != nil { @@ -1016,7 +763,12 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) } if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { - if strings.HasPrefix(strings.TrimLeftFunc(stderr.String()[len("# "):], isPkgPathRune), "\n") { + msg := stderr.String()[len("# "):] + if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { + return stdout, nil + } + // Treat pkg-config errors as a special case (golang.org/issue/36770). + if strings.HasPrefix(msg, "pkg-config") { return stdout, nil } } diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index a7de62299..7974a6c9b 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -1,12 +1,13 @@ package packages import ( - "bytes" "encoding/json" "fmt" "go/parser" "go/token" + "os" "path/filepath" + "sort" "strconv" "strings" ) @@ -16,7 +17,7 @@ import ( // sometimes incorrect. // TODO(matloob): Handle unsupported cases, including the following: // - determining the correct package to add given a new import path -func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func() *goInfo) (modifiedPkgs, needPkgs []string, err error) { +func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { havePkgs := make(map[string]string) // importPath -> non-test package ID needPkgsSet := make(map[string]bool) modifiedPkgsSet := make(map[string]bool) @@ -34,7 +35,23 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( // potentially modifying the transitive set of dependencies). var overlayAddsImports bool - for opath, contents := range cfg.Overlay { + // If both a package and its test package are created by the overlay, we + // need the real package first. Process all non-test files before test + // files, and make the whole process deterministic while we're at it. + var overlayFiles []string + for opath := range state.cfg.Overlay { + overlayFiles = append(overlayFiles, opath) + } + sort.Slice(overlayFiles, func(i, j int) bool { + iTest := strings.HasSuffix(overlayFiles[i], "_test.go") + jTest := strings.HasSuffix(overlayFiles[j], "_test.go") + if iTest != jTest { + return !iTest // non-tests are before tests. + } + return overlayFiles[i] < overlayFiles[j] + }) + for _, opath := range overlayFiles { + contents := state.cfg.Overlay[opath] base := filepath.Base(opath) dir := filepath.Dir(opath) var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant @@ -64,14 +81,8 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( testVariantOf = p continue nextPackage } + // We must have already seen the package of which this is a test variant. if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { - // If we've already seen the test variant, - // make sure to label which package it is a test variant of. - if hasTestFiles(pkg) { - testVariantOf = p - continue nextPackage - } - // If we have already seen the package of which this is a test variant. if hasTestFiles(p) { testVariantOf = pkg } @@ -86,7 +97,10 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( if pkg == nil { // Try to find the module or gopath dir the file is contained in. // Then for modules, add the module opath to the beginning. - pkgPath, ok := getPkgPath(cfg, dir, rootDirs) + pkgPath, ok, err := state.getPkgPath(dir) + if err != nil { + return nil, nil, err + } if !ok { break } @@ -114,6 +128,11 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( if isTestFile && !isXTest && testVariantOf != nil { pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + // Add the package under test and its imports to the test variant. + pkg.forTest = testVariantOf.PkgPath + for k, v := range testVariantOf.Imports { + pkg.Imports[k] = &Package{ID: v.ID} + } } } } @@ -130,42 +149,45 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( continue } for _, imp := range imports { - _, found := pkg.Imports[imp] - if !found { - overlayAddsImports = true - // TODO(matloob): Handle cases when the following block isn't correct. - // These include imports of vendored packages, etc. - id, ok := havePkgs[imp] - if !ok { - id = imp - } - pkg.Imports[imp] = &Package{ID: id} - // Add dependencies to the non-test variant version of this package as wel. - if testVariantOf != nil { - testVariantOf.Imports[imp] = &Package{ID: id} + if _, found := pkg.Imports[imp]; found { + continue + } + overlayAddsImports = true + id, ok := havePkgs[imp] + if !ok { + var err error + id, err = state.resolveImport(dir, imp) + if err != nil { + return nil, nil, err } } + pkg.Imports[imp] = &Package{ID: id} + // Add dependencies to the non-test variant version of this package as well. + if testVariantOf != nil { + testVariantOf.Imports[imp] = &Package{ID: id} + } } - continue } - // toPkgPath tries to guess the package path given the id. - // This isn't always correct -- it's certainly wrong for - // vendored packages' paths. - toPkgPath := func(id string) string { - // TODO(matloob): Handle vendor paths. - i := strings.IndexByte(id, ' ') - if i >= 0 { - return id[:i] + // toPkgPath guesses the package path given the id. + toPkgPath := func(sourceDir, id string) (string, error) { + if i := strings.IndexByte(id, ' '); i >= 0 { + return state.resolveImport(sourceDir, id[:i]) } - return id + return state.resolveImport(sourceDir, id) } - // Do another pass now that new packages have been created to determine the - // set of missing packages. + // Now that new packages have been created, do another pass to determine + // the new set of missing packages. for _, pkg := range response.dr.Packages { for _, imp := range pkg.Imports { - pkgPath := toPkgPath(imp.ID) + if len(pkg.GoFiles) == 0 { + return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) + } + pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) + if err != nil { + return nil, nil, err + } if _, ok := havePkgs[pkgPath]; !ok { needPkgsSet[pkgPath] = true } @@ -185,6 +207,52 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( return modifiedPkgs, needPkgs, err } +// resolveImport finds the the ID of a package given its import path. +// In particular, it will find the right vendored copy when in GOPATH mode. +func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { + env, err := state.getEnv() + if err != nil { + return "", err + } + if env["GOMOD"] != "" { + return importPath, nil + } + + searchDir := sourceDir + for { + vendorDir := filepath.Join(searchDir, "vendor") + exists, ok := state.vendorDirs[vendorDir] + if !ok { + info, err := os.Stat(vendorDir) + exists = err == nil && info.IsDir() + state.vendorDirs[vendorDir] = exists + } + + if exists { + vendoredPath := filepath.Join(vendorDir, importPath) + if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { + // We should probably check for .go files here, but shame on anyone who fools us. + path, ok, err := state.getPkgPath(vendoredPath) + if err != nil { + return "", err + } + if ok { + return path, nil + } + } + } + + // We know we've hit the top of the filesystem when we Dir / and get /, + // or C:\ and get C:\, etc. + next := filepath.Dir(searchDir) + if next == searchDir { + break + } + searchDir = next + } + return importPath, nil +} + func hasTestFiles(p *Package) bool { for _, f := range p.GoFiles { if strings.HasSuffix(f, "_test.go") { @@ -194,44 +262,59 @@ func hasTestFiles(p *Package) bool { return false } -// determineRootDirs returns a mapping from directories code can be contained in to the -// corresponding import path prefixes of those directories. -// Its result is used to try to determine the import path for a package containing -// an overlay file. -func determineRootDirs(cfg *Config) map[string]string { - // Assume modules first: - out, err := invokeGo(cfg, "list", "-m", "-json", "all") +// determineRootDirs returns a mapping from absolute directories that could +// contain code to their corresponding import path prefixes. +func (state *golistState) determineRootDirs() (map[string]string, error) { + env, err := state.getEnv() if err != nil { - return determineRootDirsGOPATH(cfg) + return nil, err + } + if env["GOMOD"] != "" { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsModules() + }) + } else { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() + }) + } + return state.rootDirs, state.rootDirsError +} + +func (state *golistState) determineRootDirsModules() (map[string]string, error) { + out, err := state.invokeGo("list", "-m", "-json", "all") + if err != nil { + return nil, err } m := map[string]string{} type jsonMod struct{ Path, Dir string } for dec := json.NewDecoder(out); dec.More(); { mod := new(jsonMod) if err := dec.Decode(mod); err != nil { - return m // Give up and return an empty map. Package won't be found for overlay. + return nil, err } if mod.Dir != "" && mod.Path != "" { // This is a valid module; add it to the map. - m[mod.Dir] = mod.Path + absDir, err := filepath.Abs(mod.Dir) + if err != nil { + return nil, err + } + m[absDir] = mod.Path } } - return m + return m, nil } -func determineRootDirsGOPATH(cfg *Config) map[string]string { +func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { m := map[string]string{} - out, err := invokeGo(cfg, "env", "GOPATH") - if err != nil { - // Could not determine root dir mapping. Everything is best-effort, so just return an empty map. - // When we try to find the import path for a directory, there will be no root-dir match and - // we'll give up. - return m + for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { + absDir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + m[filepath.Join(absDir, "src")] = "" } - for _, p := range filepath.SplitList(string(bytes.TrimSpace(out.Bytes()))) { - m[filepath.Join(p, "src")] = "" - } - return m + return m, nil } func extractImports(filename string, contents []byte) ([]string, error) { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 050cca43a..586c714f6 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -23,6 +23,7 @@ import ( "sync" "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/internal/packagesinternal" ) // A LoadMode controls the amount of detail to return when loading. @@ -34,6 +35,9 @@ import ( // Load may return more information than requested. type LoadMode int +// TODO(matloob): When a V2 of go/packages is released, rename NeedExportsFile to +// NeedExportFile to make it consistent with the Package field it's adding. + const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota @@ -51,7 +55,7 @@ const ( // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. NeedDeps - // NeedExportsFile adds ExportsFile. + // NeedExportsFile adds ExportFile. NeedExportsFile // NeedTypes adds Types, Fset, and IllTyped. @@ -160,7 +164,7 @@ type Config struct { Tests bool // Overlay provides a mapping of absolute file paths to file contents. - // If the file with the given path already exists, the parser will use the + // If the file with the given path already exists, the parser will use the // alternative file contents provided by the map. // // Overlays provide incomplete support for when a given file doesn't @@ -292,6 +296,15 @@ type Package struct { // TypesSizes provides the effective size function for types in TypesInfo. TypesSizes types.Sizes + + // forTest is the package under test, if any. + forTest string +} + +func init() { + packagesinternal.GetForTest = func(p interface{}) string { + return p.(*Package).forTest + } } // An Error describes a problem with a package's metadata, syntax, or types. @@ -500,12 +513,23 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { if i, found := rootMap[pkg.ID]; found { rootIndex = i } + + // Overlays can invalidate export data. + // TODO(matloob): make this check fine-grained based on dependencies on overlaid files + exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" + // This package needs type information if the caller requested types and the package is + // either a root, or it's a non-root and the user requested dependencies ... + needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + // This package needs source if the call requested source (or types info, which implies source) + // and the package is either a root, or itas a non- root and the user requested dependencies... + needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || + // ... or if we need types and the exportData is invalid. We fall back to (incompletely) + // typechecking packages from source if they fail to compile. + (ld.Mode&NeedTypes|NeedTypesInfo != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" lpkg := &loaderPackage{ Package: pkg, - needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0, - needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0 || - len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files - pkg.ExportFile == "" && pkg.PkgPath != "unsafe", + needtypes: needtypes, + needsrc: needsrc, } ld.pkgs[lpkg.ID] = lpkg if rootIndex >= 0 { @@ -713,7 +737,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { // which would then require that such created packages be explicitly // inserted back into the Import graph as a final step after export data loading. // The Diamond test exercises this case. - if !lpkg.needtypes { + if !lpkg.needtypes && !lpkg.needsrc { return } if !lpkg.needsrc { diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go new file mode 100644 index 000000000..b4f428767 --- /dev/null +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -0,0 +1,67 @@ +// Package imports implements a Go pretty-printer (like package "go/format") +// that also adds or removes import statements as necessary. +package imports // import "golang.org/x/tools/imports" + +import ( + "go/build" + "os" + + intimp "golang.org/x/tools/internal/imports" +) + +// Options specifies options for processing files. +type Options struct { + Fragment bool // Accept fragment of a source file (no package statement) + AllErrors bool // Report all errors (not just the first 10 on different lines) + + Comments bool // Print comments (true if nil *Options provided) + TabIndent bool // Use tabs for indent (true if nil *Options provided) + TabWidth int // Tab width (8 if nil *Options provided) + + FormatOnly bool // Disable the insertion and deletion of imports +} + +// Debug controls verbose logging. +var Debug = false + +// LocalPrefix is a comma-separated string of import path prefixes, which, if +// set, instructs Process to sort the import paths with the given prefixes +// into another group after 3rd-party packages. +var LocalPrefix string + +// Process formats and adjusts imports for the provided file. +// If opt is nil the defaults are used. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +// To process data ``as if'' it were in filename, pass the data as a non-nil src. +func Process(filename string, src []byte, opt *Options) ([]byte, error) { + if opt == nil { + opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} + } + intopt := &intimp.Options{ + Env: &intimp.ProcessEnv{ + GOPATH: build.Default.GOPATH, + GOROOT: build.Default.GOROOT, + GOFLAGS: os.Getenv("GOFLAGS"), + GO111MODULE: os.Getenv("GO111MODULE"), + GOPROXY: os.Getenv("GOPROXY"), + GOSUMDB: os.Getenv("GOSUMDB"), + Debug: Debug, + LocalPrefix: LocalPrefix, + }, + AllErrors: opt.AllErrors, + Comments: opt.Comments, + FormatOnly: opt.FormatOnly, + Fragment: opt.Fragment, + TabIndent: opt.TabIndent, + TabWidth: opt.TabWidth, + } + return intimp.Process(filename, src, intopt) +} + +// VendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". +func VendorlessPath(ipath string) string { + return intimp.VendorlessPath(ipath) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go index 7219c8e9f..9887f7e7a 100644 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go @@ -14,14 +14,14 @@ import ( "sync" ) -// TraverseLink is used as a return value from WalkFuncs to indicate that the +// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the // symlink named in the call may be traversed. -var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") +var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") -// SkipFiles is a used as a return value from WalkFuncs to indicate that the +// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the // callback should not be called for any other files in the current directory. // Child directories will still be traversed. -var SkipFiles = errors.New("fastwalk: skip remaining files in directory") +var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory") // Walk is a faster implementation of filepath.Walk. // @@ -167,7 +167,7 @@ func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { err := w.fn(joined, typ) if typ == os.ModeSymlink { - if err == TraverseLink { + if err == ErrTraverseLink { // Set callbackDone so we don't call it twice for both the // symlink-as-symlink and the symlink-as-directory later: w.enqueue(walkItem{dir: joined, callbackDone: true}) diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go index a906b8759..b0d6327a9 100644 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go @@ -26,7 +26,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e continue } if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { - if err == SkipFiles { + if err == ErrSkipFiles { skipFiles = true continue } diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go index 3369b1a0b..ce38fdcf8 100644 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go @@ -66,7 +66,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e continue } if err := fn(dirName, name, typ); err != nil { - if err == SkipFiles { + if err == ErrSkipFiles { skipFiles = true continue } diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 9a61bdbf5..64309db74 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -77,6 +77,7 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root } } +// walkDir creates a walker and starts fastwalk with this walker. func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { if _, err := os.Stat(root.Path); os.IsNotExist(err) { if opts.Debug { @@ -114,7 +115,7 @@ type walker struct { ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. } -// init initializes the walker based on its Options. +// init initializes the walker based on its Options func (w *walker) init() { var ignoredPaths []string if w.root.Type == RootModuleCache { @@ -167,6 +168,7 @@ func (w *walker) getIgnoredDirs(path string) []string { return ignoredDirs } +// shouldSkipDir reports whether the file should be skipped or not. func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { for _, ignoredDir := range w.ignoredDirs { if os.SameFile(fi, ignoredDir) { @@ -180,20 +182,21 @@ func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { return false } +// walk walks through the given path. func (w *walker) walk(path string, typ os.FileMode) error { dir := filepath.Dir(path) if typ.IsRegular() { if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { // Doesn't make sense to have regular files // directly in your $GOPATH/src or $GOROOT/src. - return fastwalk.SkipFiles + return fastwalk.ErrSkipFiles } if !strings.HasSuffix(path, ".go") { return nil } w.add(w.root, dir) - return fastwalk.SkipFiles + return fastwalk.ErrSkipFiles } if typ == os.ModeDir { base := filepath.Base(path) @@ -221,7 +224,7 @@ func (w *walker) walk(path string, typ os.FileMode) error { return nil } if w.shouldTraverse(dir, fi) { - return fastwalk.TraverseLink + return fastwalk.ErrTraverseLink } } return nil diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index f531024da..f95d0f440 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -27,7 +27,6 @@ import ( "unicode/utf8" "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/packages" "golang.org/x/tools/internal/gopathwalk" ) @@ -82,7 +81,8 @@ type ImportFix struct { // IdentName is the identifier that this fix will add or remove. IdentName string // FixType is the type of fix this is (AddImport, DeleteImport, SetImportName). - FixType ImportFixType + FixType ImportFixType + Relevance int // see pkg } // An ImportInfo represents a single import statement. @@ -537,7 +537,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv // derive package names from import paths, see if the file is already // complete. We can't add any imports yet, because we don't know // if missing references are actually package vars. - p := &pass{fset: fset, f: f, srcDir: srcDir} + p := &pass{fset: fset, f: f, srcDir: srcDir, env: env} if fixes, done := p.load(); done { return fixes, nil } @@ -559,8 +559,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv } // Third pass: get real package names where we had previously used - // the naive algorithm. This is the first step that will use the - // environment, so we provide it here for the first time. + // the naive algorithm. p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} p.loadRealPackageNames = true p.otherFiles = otherFiles @@ -585,62 +584,86 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv return fixes, nil } -// getCandidatePkgs returns the list of pkgs that are accessible from filename, -// optionall filtered to only packages named pkgName. -func getCandidatePkgs(pkgName, filename string, env *ProcessEnv) ([]*pkg, error) { - // TODO(heschi): filter out current package. (Don't forget x_test can import x.) +// Highest relevance, used for the standard library. Chosen arbitrarily to +// match pre-existing gopls code. +const MaxRelevance = 7 - var result []*pkg +// getCandidatePkgs works with the passed callback to find all acceptable packages. +// It deduplicates by import path, and uses a cached stdlib rather than reading +// from disk. +func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filename, filePkg string, env *ProcessEnv) error { + notSelf := func(p *pkg) bool { + return p.packageName != filePkg || p.dir != filepath.Dir(filename) + } // Start off with the standard library. - for importPath := range stdlib { - if pkgName != "" && path.Base(importPath) != pkgName { - continue - } - result = append(result, &pkg{ + for importPath, exports := range stdlib { + p := &pkg{ dir: filepath.Join(env.GOROOT, "src", importPath), importPathShort: importPath, packageName: path.Base(importPath), - relevance: 0, - }) - } - - // Exclude goroot results -- getting them is relatively expensive, not cached, - // and generally redundant with the in-memory version. - exclude := []gopathwalk.RootType{gopathwalk.RootGOROOT} - // Only the go/packages resolver uses the first argument, and nobody uses that resolver. - scannedPkgs, err := env.GetResolver().scan(nil, true, exclude) - if err != nil { - return nil, err + relevance: MaxRelevance, + } + if notSelf(p) && wrappedCallback.packageNameLoaded(p) { + wrappedCallback.exportsLoaded(p, exports) + } } + var mu sync.Mutex dupCheck := map[string]struct{}{} - for _, pkg := range scannedPkgs { - if pkgName != "" && pkg.packageName != pkgName { - continue - } - if !canUse(filename, pkg.dir) { - continue - } - if _, ok := dupCheck[pkg.importPathShort]; ok { - continue - } - dupCheck[pkg.importPathShort] = struct{}{} - result = append(result, pkg) + + scanFilter := &scanCallback{ + rootFound: func(root gopathwalk.Root) bool { + // Exclude goroot results -- getting them is relatively expensive, not cached, + // and generally redundant with the in-memory version. + return root.Type != gopathwalk.RootGOROOT && wrappedCallback.rootFound(root) + }, + dirFound: wrappedCallback.dirFound, + packageNameLoaded: func(pkg *pkg) bool { + mu.Lock() + defer mu.Unlock() + if _, ok := dupCheck[pkg.importPathShort]; ok { + return false + } + dupCheck[pkg.importPathShort] = struct{}{} + return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg) + }, + exportsLoaded: func(pkg *pkg, exports []string) { + // If we're an x_test, load the package under test's test variant. + if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) { + var err error + _, exports, err = loadExportsFromFiles(ctx, env, pkg.dir, true) + if err != nil { + return + } + } + wrappedCallback.exportsLoaded(pkg, exports) + }, } + return env.GetResolver().scan(ctx, scanFilter) +} - // Sort first by relevance, then by package name, with import path as a tiebreaker. - sort.Slice(result, func(i, j int) bool { - pi, pj := result[i], result[j] - if pi.relevance != pj.relevance { - return pi.relevance < pj.relevance - } - if pi.packageName != pj.packageName { - return pi.packageName < pj.packageName - } - return pi.importPathShort < pj.importPathShort - }) +func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) map[string]int { + result := make(map[string]int) + for _, path := range paths { + result[path] = env.GetResolver().scoreImportPath(ctx, path) + } + return result +} - return result, nil +func PrimeCache(ctx context.Context, env *ProcessEnv) error { + // Fully scan the disk for directories, but don't actually read any Go files. + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return false + }, + packageNameLoaded: func(pkg *pkg) bool { + return false + }, + } + return getCandidatePkgs(ctx, callback, "", "", env) } func candidateImportName(pkg *pkg) string { @@ -651,23 +674,37 @@ func candidateImportName(pkg *pkg) string { } // getAllCandidates gets all of the candidates to be imported, regardless of if they are needed. -func getAllCandidates(filename string, env *ProcessEnv) ([]ImportFix, error) { - pkgs, err := getCandidatePkgs("", filename, env) - if err != nil { - return nil, err +func getAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !canUse(filename, pkg.dir) { + return false + } + // Try the assumed package name first, then a simpler path match + // in case of packages named vN, which are not uncommon. + return strings.HasPrefix(ImportPathToAssumedName(pkg.importPathShort), searchPrefix) || + strings.HasPrefix(path.Base(pkg.importPathShort), searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + if !strings.HasPrefix(pkg.packageName, searchPrefix) { + return false + } + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, } - result := make([]ImportFix, 0, len(pkgs)) - for _, pkg := range pkgs { - result = append(result, ImportFix{ - StmtInfo: ImportInfo{ - ImportPath: pkg.importPathShort, - Name: candidateImportName(pkg), - }, - IdentName: pkg.packageName, - FixType: AddImport, - }) - } - return result, nil + return getCandidatePkgs(ctx, callback, filename, filePkg, env) } // A PackageExport is a package and its exports. @@ -676,42 +713,34 @@ type PackageExport struct { Exports []string } -func getPackageExports(completePackage, filename string, env *ProcessEnv) ([]PackageExport, error) { - pkgs, err := getCandidatePkgs(completePackage, filename, env) - if err != nil { - return nil, err +func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, references{searchPkg: nil}, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + return pkg.packageName == searchPkg + }, + exportsLoaded: func(pkg *pkg, exports []string) { + sort.Strings(exports) + wrapped(PackageExport{ + Fix: &ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }, + Exports: exports, + }) + }, } - - results := make([]PackageExport, 0, len(pkgs)) - for _, pkg := range pkgs { - fix := &ImportFix{ - StmtInfo: ImportInfo{ - ImportPath: pkg.importPathShort, - Name: candidateImportName(pkg), - }, - IdentName: pkg.packageName, - FixType: AddImport, - } - var exports []string - if e, ok := stdlib[pkg.importPathShort]; ok { - exports = e - } else { - exports, err = loadExportsForPackage(context.Background(), env, completePackage, pkg) - if err != nil { - if env.Debug { - env.Logf("while completing %q, error loading exports from %q: %v", completePackage, pkg.importPathShort, err) - } - continue - } - } - sort.Strings(exports) - results = append(results, PackageExport{ - Fix: fix, - Exports: exports, - }) - } - - return results, nil + return getCandidatePkgs(ctx, callback, filename, filePkg, env) } // ProcessEnv contains environment variables and settings that affect the use of @@ -725,15 +754,19 @@ type ProcessEnv struct { GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string WorkingDir string - // If true, use go/packages regardless of the environment. - ForceGoPackages bool - // Logf is the default logger for the ProcessEnv. Logf func(format string, args ...interface{}) resolver Resolver } +// CopyConfig copies the env's configuration into a new env. +func (e *ProcessEnv) CopyConfig() *ProcessEnv { + copy := *e + copy.resolver = nil + return © +} + func (e *ProcessEnv) env() []string { env := os.Environ() add := func(k, v string) { @@ -757,39 +790,34 @@ func (e *ProcessEnv) GetResolver() Resolver { if e.resolver != nil { return e.resolver } - if e.ForceGoPackages { - e.resolver = &goPackagesResolver{env: e} - return e.resolver - } - out, err := e.invokeGo("env", "GOMOD") if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 { - e.resolver = &gopathResolver{env: e} + e.resolver = newGopathResolver(e) return e.resolver } - e.resolver = &ModuleResolver{env: e} + e.resolver = newModuleResolver(e) return e.resolver } -func (e *ProcessEnv) newPackagesConfig(mode packages.LoadMode) *packages.Config { - return &packages.Config{ - Mode: mode, - Dir: e.WorkingDir, - Env: e.env(), - } -} - func (e *ProcessEnv) buildContext() *build.Context { ctx := build.Default ctx.GOROOT = e.GOROOT ctx.GOPATH = e.GOPATH - // As of Go 1.14, build.Context has a WorkingDir field + // As of Go 1.14, build.Context has a Dir field // (see golang.org/issue/34860). // Populate it only if present. - if wd := reflect.ValueOf(&ctx).Elem().FieldByName("WorkingDir"); wd.IsValid() && wd.Kind() == reflect.String { - wd.SetString(e.WorkingDir) + rc := reflect.ValueOf(&ctx).Elem() + dir := rc.FieldByName("Dir") + if !dir.IsValid() { + // Working drafts of Go 1.14 named the field "WorkingDir" instead. + // TODO(bcmills): Remove this case after the Go 1.14 beta has been released. + dir = rc.FieldByName("WorkingDir") } + if dir.IsValid() && dir.Kind() == reflect.String { + dir.SetString(e.WorkingDir) + } + return &ctx } @@ -824,6 +852,10 @@ func cmdDebugStr(cmd *exec.Cmd) string { func addStdlibCandidates(pass *pass, refs references) { add := func(pkg string) { + // Prevent self-imports. + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.GOROOT, "src", pkg) == pass.srcDir { + return + } exports := copyExports(stdlib[pkg]) pass.addCandidate( &ImportInfo{ImportPath: pkg}, @@ -848,94 +880,65 @@ func addStdlibCandidates(pass *pass, refs references) { type Resolver interface { // loadPackageNames loads the package names in importPaths. loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) - // scan finds (at least) the packages satisfying refs. If loadNames is true, - // package names will be set on the results, and dirs whose package name - // could not be determined will be excluded. - scan(refs references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) + // scan works with callback to search for packages. See scanCallback for details. + scan(ctx context.Context, callback *scanCallback) error // loadExports returns the set of exported symbols in the package at dir. // loadExports may be called concurrently. - loadExports(ctx context.Context, pkg *pkg) (string, []string, error) + loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) + // scoreImportPath returns the relevance for an import path. + scoreImportPath(ctx context.Context, path string) int ClearForNewScan() } -// gopackagesResolver implements resolver for GOPATH and module workspaces using go/packages. -type goPackagesResolver struct { - env *ProcessEnv -} - -func (r *goPackagesResolver) ClearForNewScan() {} - -func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { - if len(importPaths) == 0 { - return nil, nil - } - cfg := r.env.newPackagesConfig(packages.LoadFiles) - pkgs, err := packages.Load(cfg, importPaths...) - if err != nil { - return nil, err - } - names := map[string]string{} - for _, pkg := range pkgs { - names[VendorlessPath(pkg.PkgPath)] = pkg.Name - } - // We may not have found all the packages. Guess the rest. - for _, path := range importPaths { - if _, ok := names[path]; ok { - continue - } - names[path] = ImportPathToAssumedName(path) - } - return names, nil - -} - -func (r *goPackagesResolver) scan(refs references, _ bool, _ []gopathwalk.RootType) ([]*pkg, error) { - var loadQueries []string - for pkgName := range refs { - loadQueries = append(loadQueries, "iamashamedtousethedisabledqueryname="+pkgName) - } - sort.Strings(loadQueries) - cfg := r.env.newPackagesConfig(packages.LoadFiles) - goPackages, err := packages.Load(cfg, loadQueries...) - if err != nil { - return nil, err - } - - var scan []*pkg - for _, goPackage := range goPackages { - scan = append(scan, &pkg{ - dir: filepath.Dir(goPackage.CompiledGoFiles[0]), - importPathShort: VendorlessPath(goPackage.PkgPath), - goPackage: goPackage, - packageName: goPackage.Name, - }) - } - return scan, nil -} - -func (r *goPackagesResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { - if pkg.goPackage == nil { - return "", nil, fmt.Errorf("goPackage not set") - } - var exports []string - fset := token.NewFileSet() - for _, fname := range pkg.goPackage.CompiledGoFiles { - f, err := parser.ParseFile(fset, fname, nil, 0) - if err != nil { - return "", nil, fmt.Errorf("parsing %s: %v", fname, err) - } - for name := range f.Scope.Objects { - if ast.IsExported(name) { - exports = append(exports, name) - } - } - } - return pkg.goPackage.Name, exports, nil +// A scanCallback controls a call to scan and receives its results. +// In general, minor errors will be silently discarded; a user should not +// expect to receive a full series of calls for everything. +type scanCallback struct { + // rootFound is called before scanning a new root dir. If it returns true, + // the root will be scanned. Returning false will not necessarily prevent + // directories from that root making it to dirFound. + rootFound func(gopathwalk.Root) bool + // dirFound is called when a directory is found that is possibly a Go package. + // pkg will be populated with everything except packageName. + // If it returns true, the package's name will be loaded. + dirFound func(pkg *pkg) bool + // packageNameLoaded is called when a package is found and its name is loaded. + // If it returns true, the package's exports will be loaded. + packageNameLoaded func(pkg *pkg) bool + // exportsLoaded is called when a package's exports have been loaded. + exportsLoaded func(pkg *pkg, exports []string) } func addExternalCandidates(pass *pass, refs references, filename string) error { - dirScan, err := pass.env.GetResolver().scan(refs, false, nil) + var mu sync.Mutex + found := make(map[string][]pkgDistance) + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true // We want everything. + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, refs, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + if _, want := refs[pkg.packageName]; !want { + return false + } + if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + return false + } + if !canUse(filename, pkg.dir) { + return false + } + mu.Lock() + defer mu.Unlock() + found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)}) + return false // We'll do our own loading after we sort. + }, + } + err := pass.env.GetResolver().scan(context.Background(), callback) if err != nil { return err } @@ -962,7 +965,7 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { go func(pkgName string, symbols map[string]bool) { defer wg.Done() - found, err := findImport(ctx, pass, dirScan, pkgName, symbols, filename) + found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename) if err != nil { firstErrOnce.Do(func() { @@ -1033,24 +1036,36 @@ func ImportPathToAssumedName(importPath string) string { // gopathResolver implements resolver for GOPATH workspaces. type gopathResolver struct { - env *ProcessEnv - cache *dirInfoCache + env *ProcessEnv + walked bool + cache *dirInfoCache + scanSema chan struct{} // scanSema prevents concurrent scans. } -func (r *gopathResolver) init() { - if r.cache == nil { - r.cache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - } +func newGopathResolver(env *ProcessEnv) *gopathResolver { + r := &gopathResolver{ + env: env, + cache: &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + }, + scanSema: make(chan struct{}, 1), } + r.scanSema <- struct{}{} + return r } func (r *gopathResolver) ClearForNewScan() { - r.cache = nil + <-r.scanSema + r.cache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + r.walked = false + r.scanSema <- struct{}{} } func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { - r.init() names := map[string]string{} for _, path := range importPaths { names[path] = importPathToName(r.env, path, srcDir) @@ -1130,7 +1145,6 @@ func packageDirToName(dir string) (packageName string, err error) { } type pkg struct { - goPackage *packages.Package dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") importPathShort string // vendorless import path ("net/http", "a/b") packageName string // package name loaded from source if requested @@ -1178,8 +1192,7 @@ func distance(basepath, targetpath string) int { return strings.Count(p, string(filepath.Separator)) + 1 } -func (r *gopathResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) { - r.init() +func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error { add := func(root gopathwalk.Root, dir string) { // We assume cached directories have not changed. We can skip them and their // children. @@ -1196,56 +1209,84 @@ func (r *gopathResolver) scan(_ references, loadNames bool, exclude []gopathwalk } r.cache.Store(dir, info) } - roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), exclude) - gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false}) - var result []*pkg - for _, dir := range r.cache.Keys() { - info, ok := r.cache.Load(dir) - if !ok { - continue - } - if loadNames { - var err error - info, err = r.cache.CachePackageName(info) - if err != nil { - continue - } + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return } p := &pkg{ importPathShort: info.nonCanonicalImportPath, - dir: dir, - relevance: 1, - packageName: info.packageName, + dir: info.dir, + relevance: MaxRelevance - 1, } if info.rootType == gopathwalk.RootGOROOT { - p.relevance = 0 + p.relevance = MaxRelevance + } + + if !callback.dirFound(p) { + return + } + var err error + p.packageName, err = r.cache.CachePackageName(info) + if err != nil { + return + } + + if !callback.packageNameLoaded(p) { + return + } + if _, exports, err := r.loadExports(ctx, p, false); err == nil { + callback.exportsLoaded(p, exports) } - result = append(result, p) } - return result, nil + stop := r.cache.ScanAndListen(ctx, processDir) + defer stop() + // The callback is not necessarily safe to use in the goroutine below. Process roots eagerly. + roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false}) + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil } -func filterRoots(roots []gopathwalk.Root, exclude []gopathwalk.RootType) []gopathwalk.Root { +func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) int { + if _, ok := stdlib[path]; ok { + return MaxRelevance + } + return MaxRelevance - 1 +} + +func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) []gopathwalk.Root { var result []gopathwalk.Root -outer: for _, root := range roots { - for _, i := range exclude { - if i == root.Type { - continue outer - } + if !include(root) { + continue } result = append(result, root) } return result } -func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { - r.init() - if info, ok := r.cache.Load(pkg.dir); ok { +func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { + if info, ok := r.cache.Load(pkg.dir); ok && !includeTest { return r.cache.CacheExports(ctx, r.env, info) } - return loadExportsFromFiles(ctx, r.env, pkg.dir) + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) } // VendorlessPath returns the devendorized version of the import path ipath. @@ -1261,7 +1302,7 @@ func VendorlessPath(ipath string) string { return ipath } -func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (string, []string, error) { +func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) { var exports []string // Look for non-test, buildable .go files which could provide exports. @@ -1272,7 +1313,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str var files []os.FileInfo for _, fi := range all { name := fi.Name() - if !strings.HasSuffix(name, ".go") || strings.HasSuffix(name, "_test.go") { + if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) { continue } match, err := env.buildContext().MatchFile(dir, fi.Name()) @@ -1305,6 +1346,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str // handled by MatchFile above. continue } + if includeTest && strings.HasSuffix(f.Name.Name, "_test") { + // x_test package. We want internal test files only. + continue + } pkgName = f.Name.Name for name := range f.Scope.Objects { if ast.IsExported(name) { @@ -1323,29 +1368,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str // findImport searches for a package with the given symbols. // If no package is found, findImport returns ("", false, nil) -func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { - pkgDir, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - pkgDir = filepath.Dir(pkgDir) - - // Find candidate packages, looking only at their directory names first. - var candidates []pkgDistance - for _, pkg := range dirScan { - if pkg.dir == pkgDir && pass.f.Name.Name == pkgName { - // The candidate is in the same directory and has the - // same package name. Don't try to import ourselves. - continue - } - if pkgIsCandidate(filename, pkgName, pkg) { - candidates = append(candidates, pkgDistance{ - pkg: pkg, - distance: distance(pkgDir, pkg.dir), - }) - } - } - +func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { // Sort the candidates by their import package length, // assuming that shorter package names are better than long // ones. Note that this sorts by the de-vendored name, so @@ -1358,7 +1381,6 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, } // Collect exports for packages with matching names. - rescv := make([]chan *pkg, len(candidates)) for i := range candidates { rescv[i] = make(chan *pkg, 1) @@ -1393,7 +1415,9 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, if pass.env.Debug { pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) } - exports, err := loadExportsForPackage(ctx, pass.env, pkgName, c.pkg) + // If we're an x_test, load the package under test's test variant. + includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir + _, exports, err := pass.env.GetResolver().loadExports(ctx, c.pkg, includeTest) if err != nil { if pass.env.Debug { pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) @@ -1430,17 +1454,6 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, return nil, nil } -func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg string, pkg *pkg) ([]string, error) { - pkgName, exports, err := env.GetResolver().loadExports(ctx, pkg) - if err != nil { - return nil, err - } - if expectPkg != pkgName { - return nil, fmt.Errorf("dir %v is package %v, wanted %v", pkg.dir, pkgName, expectPkg) - } - return exports, err -} - // pkgIsCandidate reports whether pkg is a candidate for satisfying the // finding which package pkgIdent in the file named by filename is trying // to refer to. @@ -1453,7 +1466,7 @@ func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg strin // filename is the file being formatted. // pkgIdent is the package being searched for, like "client" (if // searching for "client.New") -func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool { +func pkgIsCandidate(filename string, refs references, pkg *pkg) bool { // Check "internal" and "vendor" visibility: if !canUse(filename, pkg.dir) { return false @@ -1471,17 +1484,18 @@ func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool { // "bar", which is strongly discouraged // anyway. There's no reason goimports needs // to be slow just to accommodate that. - lastTwo := lastTwoComponents(pkg.importPathShort) - if strings.Contains(lastTwo, pkgIdent) { - return true - } - if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { - lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + for pkgIdent := range refs { + lastTwo := lastTwoComponents(pkg.importPathShort) if strings.Contains(lastTwo, pkgIdent) { return true } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + } } - return false } diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index 7c1b47536..2e7a317e5 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -11,6 +11,7 @@ package imports import ( "bufio" "bytes" + "context" "fmt" "go/ast" "go/build" @@ -21,6 +22,7 @@ import ( "io" "io/ioutil" "log" + "os" "regexp" "strconv" "strings" @@ -83,8 +85,9 @@ func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, return getFixes(fileSet, file, filename, opt.Env) } -// ApplyFix will apply all of the fixes to the file and format it. -func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options) (formatted []byte, err error) { +// ApplyFixes applies all of the fixes to the file and formats it. extraMode +// is added in when parsing the file. +func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { src, opt, err = initialize(filename, src, opt) if err != nil { return nil, err @@ -100,6 +103,8 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options) ( if opt.AllErrors { parserMode |= parser.AllErrors } + parserMode |= extraMode + file, err := parser.ParseFile(fileSet, filename, src, parserMode) if file == nil { return nil, err @@ -111,23 +116,23 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options) ( return formatFile(fileSet, file, src, nil, opt) } -// GetAllCandidates gets all of the standard library candidate packages to import in -// sorted order on import path. -func GetAllCandidates(filename string, opt *Options) (pkgs []ImportFix, err error) { - _, opt, err = initialize(filename, nil, opt) +// GetAllCandidates gets all of the packages starting with prefix that can be +// imported by filename, sorted by import path. +func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefix, filename, filePkg string, opt *Options) error { + _, opt, err := initialize(filename, []byte{}, opt) if err != nil { - return nil, err + return err } - return getAllCandidates(filename, opt.Env) + return getAllCandidates(ctx, callback, searchPrefix, filename, filePkg, opt.Env) } // GetPackageExports returns all known packages with name pkg and their exports. -func GetPackageExports(pkg, filename string, opt *Options) (exports []PackageExport, err error) { - _, opt, err = initialize(filename, nil, opt) +func GetPackageExports(ctx context.Context, callback func(PackageExport), searchPkg, filename, filePkg string, opt *Options) error { + _, opt, err := initialize(filename, []byte{}, opt) if err != nil { - return nil, err + return err } - return getPackageExports(pkg, filename, opt.Env) + return getPackageExports(ctx, callback, searchPkg, filename, filePkg, opt.Env) } // initialize sets the values for opt and src. @@ -142,8 +147,12 @@ func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, er // Set the env if the user has not provided it. if opt.Env == nil { opt.Env = &ProcessEnv{ - GOPATH: build.Default.GOPATH, - GOROOT: build.Default.GOROOT, + GOPATH: build.Default.GOPATH, + GOROOT: build.Default.GOROOT, + GOFLAGS: os.Getenv("GOFLAGS"), + GO111MODULE: os.Getenv("GO111MODULE"), + GOPROXY: os.Getenv("GOPROXY"), + GOSUMDB: os.Getenv("GOSUMDB"), } } diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 0f9b87eb7..3ae859ed2 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -13,7 +13,6 @@ import ( "sort" "strconv" "strings" - "sync" "golang.org/x/tools/internal/gopathwalk" "golang.org/x/tools/internal/module" @@ -26,11 +25,14 @@ type ModuleResolver struct { env *ProcessEnv moduleCacheDir string dummyVendorMod *ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. + roots []gopathwalk.Root + scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots. + scannedRoots map[gopathwalk.Root]bool - Initialized bool - Main *ModuleJSON - ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... - ModsByDir []*ModuleJSON // ...or Dir. + initialized bool + main *ModuleJSON + modsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... + modsByDir []*ModuleJSON // ...or Dir. // moduleCacheCache stores information about the module cache. moduleCacheCache *dirInfoCache @@ -41,13 +43,23 @@ type ModuleJSON struct { Path string // module path Replace *ModuleJSON // replaced by this module Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? Dir string // directory holding files for this module, if any GoMod string // path to go.mod file for this module, if any GoVersion string // go version used in module } +func newModuleResolver(e *ProcessEnv) *ModuleResolver { + r := &ModuleResolver{ + env: e, + scanSema: make(chan struct{}, 1), + } + r.scanSema <- struct{}{} + return r +} + func (r *ModuleResolver) init() error { - if r.Initialized { + if r.initialized { return nil } mainMod, vendorEnabled, err := vendorEnabled(r.env) @@ -58,13 +70,13 @@ func (r *ModuleResolver) init() error { if mainMod != nil && vendorEnabled { // Vendor mode is on, so all the non-Main modules are irrelevant, // and we need to search /vendor for everything. - r.Main = mainMod + r.main = mainMod r.dummyVendorMod = &ModuleJSON{ Path: "", Dir: filepath.Join(mainMod.Dir, "vendor"), } - r.ModsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod} - r.ModsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod} } else { // Vendor mode is off, so run go list -m ... to find everything. r.initAllMods() @@ -72,30 +84,64 @@ func (r *ModuleResolver) init() error { r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod") - sort.Slice(r.ModsByModPath, func(i, j int) bool { + sort.Slice(r.modsByModPath, func(i, j int) bool { count := func(x int) int { - return strings.Count(r.ModsByModPath[x].Path, "/") + return strings.Count(r.modsByModPath[x].Path, "/") } return count(j) < count(i) // descending order }) - sort.Slice(r.ModsByDir, func(i, j int) bool { + sort.Slice(r.modsByDir, func(i, j int) bool { count := func(x int) int { - return strings.Count(r.ModsByDir[x].Dir, "/") + return strings.Count(r.modsByDir[x].Dir, "/") } return count(j) < count(i) // descending order }) + r.roots = []gopathwalk.Root{ + {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, + } + if r.main != nil { + r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule}) + } + if vendorEnabled { + r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) + } else { + addDep := func(mod *ModuleJSON) { + if mod.Replace == nil { + // This is redundant with the cache, but we'll skip it cheaply enough. + r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache}) + } else { + r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) + } + } + // Walk dependent modules before scanning the full mod cache, direct deps first. + for _, mod := range r.modsByModPath { + if !mod.Indirect && !mod.Main { + addDep(mod) + } + } + for _, mod := range r.modsByModPath { + if mod.Indirect && !mod.Main { + addDep(mod) + } + } + r.roots = append(r.roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache}) + } + + r.scannedRoots = map[gopathwalk.Root]bool{} if r.moduleCacheCache == nil { r.moduleCacheCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, } } if r.otherCache == nil { r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, } } - r.Initialized = true + r.initialized = true return nil } @@ -116,27 +162,35 @@ func (r *ModuleResolver) initAllMods() error { // Can't do anything with a module that's not downloaded. continue } - r.ModsByModPath = append(r.ModsByModPath, mod) - r.ModsByDir = append(r.ModsByDir, mod) + r.modsByModPath = append(r.modsByModPath, mod) + r.modsByDir = append(r.modsByDir, mod) if mod.Main { - r.Main = mod + r.main = mod } } return nil } func (r *ModuleResolver) ClearForNewScan() { + <-r.scanSema + r.scannedRoots = map[gopathwalk.Root]bool{} r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, } + r.scanSema <- struct{}{} } func (r *ModuleResolver) ClearForNewMod() { - env := r.env + <-r.scanSema *r = ModuleResolver{ - env: env, + env: r.env, + moduleCacheCache: r.moduleCacheCache, + otherCache: r.otherCache, + scanSema: r.scanSema, } r.init() + r.scanSema <- struct{}{} } // findPackage returns the module and directory that contains the package at @@ -144,7 +198,7 @@ func (r *ModuleResolver) ClearForNewMod() { func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) { // This can't find packages in the stdlib, but that's harmless for all // the existing code paths. - for _, m := range r.ModsByModPath { + for _, m := range r.modsByModPath { if !strings.HasPrefix(importPath, m.Path) { continue } @@ -211,7 +265,7 @@ func (r *ModuleResolver) cacheKeys() []string { } // cachePackageName caches the package name for a dir already in the cache. -func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) { +func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) { if info.rootType == gopathwalk.RootModuleCache { return r.moduleCacheCache.CachePackageName(info) } @@ -238,7 +292,7 @@ func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON { // - in /vendor/ in -mod=vendor mode. // - nested module? Dunno. // Rumor has it that replace targets cannot contain other replace targets. - for _, m := range r.ModsByDir { + for _, m := range r.modsByDir { if !strings.HasPrefix(dir, m.Dir) { continue } @@ -333,41 +387,49 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) ( return names, nil } -func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) { +func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error { if err := r.init(); err != nil { - return nil, err + return err } - // Walk GOROOT, GOPATH/pkg/mod, and the main module. - roots := []gopathwalk.Root{ - {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, - } - if r.Main != nil { - roots = append(roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule}) - } - if r.dummyVendorMod != nil { - roots = append(roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) - } else { - roots = append(roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache}) - // Walk replace targets, just in case they're not in any of the above. - for _, mod := range r.ModsByModPath { - if mod.Replace != nil { - roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) - } + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return } + pkg, err := r.canonicalize(info) + if err != nil { + return + } + + if !callback.dirFound(pkg) { + return + } + pkg.packageName, err = r.cachePackageName(info) + if err != nil { + return + } + + if !callback.packageNameLoaded(pkg) { + return + } + _, exports, err := r.loadExports(ctx, pkg, false) + if err != nil { + return + } + callback.exportsLoaded(pkg, exports) } - roots = filterRoots(roots, exclude) + // Start processing everything in the cache, and listen for the new stuff + // we discover in the walk below. + stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir) + defer stop1() + stop2 := r.otherCache.ScanAndListen(ctx, processDir) + defer stop2() - var result []*pkg - var mu sync.Mutex - - // We assume cached directories have not changed. We can skip them and their - // children. + // We assume cached directories are fully cached, including all their + // children, and have not changed. We can skip them. skip := func(root gopathwalk.Root, dir string) bool { - mu.Lock() - defer mu.Unlock() - info, ok := r.cacheLoad(dir) if !ok { return false @@ -379,44 +441,64 @@ func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk return packageScanned } - // Add anything new to the cache. We'll process everything in it below. + // Add anything new to the cache, and process it if we're still listening. add := func(root gopathwalk.Root, dir string) { - mu.Lock() - defer mu.Unlock() - r.cacheStore(r.scanDirForPackage(root, dir)) } - gopathwalk.WalkSkip(roots, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true}) - - // Everything we already had, and everything new, is now in the cache. - for _, dir := range r.cacheKeys() { - info, ok := r.cacheLoad(dir) - if !ok { - continue + // r.roots and the callback are not necessarily safe to use in the + // goroutine below. Process them eagerly. + roots := filterRoots(r.roots, callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: } + defer func() { r.scanSema <- struct{}{} }() + // We have the lock on r.scannedRoots, and no other scans can run. + for _, root := range roots { + if ctx.Err() != nil { + return + } - // Skip this directory if we were not able to get the package information successfully. - if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { - continue - } - - // If we want package names, make sure the cache has them. - if loadNames { - var err error - if info, err = r.cachePackageName(info); err != nil { + if r.scannedRoots[root] { continue } + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true}) + r.scannedRoots[root] = true } - - res, err := r.canonicalize(info) - if err != nil { - continue - } - result = append(result, res) + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: } + return nil +} - return result, nil +func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int { + if _, ok := stdlib[path]; ok { + return MaxRelevance + } + mod, _ := r.findPackage(path) + return modRelevance(mod) +} + +func modRelevance(mod *ModuleJSON) int { + switch { + case mod == nil: // out of scope + return MaxRelevance - 4 + case mod.Indirect: + return MaxRelevance - 3 + case !mod.Main: + return MaxRelevance - 2 + default: + return MaxRelevance - 1 // main module ties with stdlib + } } // canonicalize gets the result of canonicalizing the packages using the results @@ -428,15 +510,14 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { importPathShort: info.nonCanonicalImportPath, dir: info.dir, packageName: path.Base(info.nonCanonicalImportPath), - relevance: 0, + relevance: MaxRelevance, }, nil } importPath := info.nonCanonicalImportPath - relevance := 2 + mod := r.findModuleByDir(info.dir) // Check if the directory is underneath a module that's in scope. - if mod := r.findModuleByDir(info.dir); mod != nil { - relevance = 1 + if mod != nil { // It is. If dir is the target of a replace directive, // our guessed import path is wrong. Use the real one. if mod.Dir == info.dir { @@ -445,15 +526,16 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { dirInMod := info.dir[len(mod.Dir)+len("/"):] importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod)) } - } else if info.needsReplace { + } else if !strings.HasPrefix(importPath, info.moduleName) { + // The module's name doesn't match the package's import path. It + // probably needs a replace directive we don't have. return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir) } res := &pkg{ importPathShort: importPath, dir: info.dir, - packageName: info.packageName, // may not be populated if the caller didn't ask for it - relevance: relevance, + relevance: modRelevance(mod), } // We may have discovered a package that has a different version // in scope already. Canonicalize to that one if possible. @@ -463,14 +545,14 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { return res, nil } -func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { +func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { if err := r.init(); err != nil { return "", nil, err } - if info, ok := r.cacheLoad(pkg.dir); ok { + if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest { return r.cacheExports(ctx, r.env, info) } - return loadExportsFromFiles(ctx, r.env, pkg.dir) + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) } func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { @@ -488,7 +570,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir } switch root.Type { case gopathwalk.RootCurrentModule: - importPath = path.Join(r.Main.Path, filepath.ToSlash(subdir)) + importPath = path.Join(r.main.Path, filepath.ToSlash(subdir)) case gopathwalk.RootModuleCache: matches := modCacheRegexp.FindStringSubmatch(subdir) if len(matches) == 0 { @@ -516,7 +598,6 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir dir: dir, rootType: root.Type, nonCanonicalImportPath: importPath, - needsReplace: false, moduleDir: modDir, moduleName: modName, } @@ -524,14 +605,6 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir // stdlib packages are always in scope, despite the confusing go.mod return result } - // Check that this package is not obviously impossible to import. - if !strings.HasPrefix(importPath, modName) { - // The module's declared path does not match - // its expected path. It probably needs a - // replace directive we don't have. - result.needsReplace = true - } - return result } diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go index f6b070a3f..5b4f03acc 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -49,10 +49,6 @@ type directoryPackageInfo struct { // nonCanonicalImportPath is the package's expected import path. It may // not actually be importable at that path. nonCanonicalImportPath string - // needsReplace is true if the nonCanonicalImportPath does not match the - // module's declared path, making it impossible to import without a - // replace directive. - needsReplace bool // Module-related information. moduleDir string // The directory that is the module root of this dir. @@ -97,15 +93,86 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) ( type dirInfoCache struct { mu sync.Mutex // dirs stores information about packages in directories, keyed by absolute path. - dirs map[string]*directoryPackageInfo + dirs map[string]*directoryPackageInfo + listeners map[*int]cacheListener +} + +type cacheListener func(directoryPackageInfo) + +// ScanAndListen calls listener on all the items in the cache, and on anything +// newly added. The returned stop function waits for all in-flight callbacks to +// finish and blocks new ones. +func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { + ctx, cancel := context.WithCancel(ctx) + + // Flushing out all the callbacks is tricky without knowing how many there + // are going to be. Setting an arbitrary limit makes it much easier. + const maxInFlight = 10 + sema := make(chan struct{}, maxInFlight) + for i := 0; i < maxInFlight; i++ { + sema <- struct{}{} + } + + cookie := new(int) // A unique ID we can use for the listener. + + // We can't hold mu while calling the listener. + d.mu.Lock() + var keys []string + for key := range d.dirs { + keys = append(keys, key) + } + d.listeners[cookie] = func(info directoryPackageInfo) { + select { + case <-ctx.Done(): + return + case <-sema: + } + listener(info) + sema <- struct{}{} + } + d.mu.Unlock() + + stop := func() { + cancel() + d.mu.Lock() + delete(d.listeners, cookie) + d.mu.Unlock() + for i := 0; i < maxInFlight; i++ { + <-sema + } + } + + // Process the pre-existing keys. + for _, k := range keys { + select { + case <-ctx.Done(): + return stop + default: + } + if v, ok := d.Load(k); ok { + listener(v) + } + } + + return stop } // Store stores the package info for dir. func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { d.mu.Lock() - defer d.mu.Unlock() - stored := info // defensive copy - d.dirs[dir] = &stored + _, old := d.dirs[dir] + d.dirs[dir] = &info + var listeners []cacheListener + for _, l := range d.listeners { + listeners = append(listeners, l) + } + d.mu.Unlock() + + if !old { + for _, l := range listeners { + l(info) + } + } } // Load returns a copy of the directoryPackageInfo for absolute directory dir. @@ -129,17 +196,17 @@ func (d *dirInfoCache) Keys() (keys []string) { return keys } -func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) { +func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { if loaded, err := info.reachedStatus(nameLoaded); loaded { - return info, err + return info.packageName, err } if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { - return info, fmt.Errorf("cannot read package name, scan error: %v", err) + return "", fmt.Errorf("cannot read package name, scan error: %v", err) } info.packageName, info.err = packageDirToName(info.dir) info.status = nameLoaded d.Store(info.dir, info) - return info, info.err + return info.packageName, info.err } func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { @@ -149,8 +216,8 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d if reached, err := info.reachedStatus(nameLoaded); reached && err != nil { return "", nil, err } - info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir) - if info.err == context.Canceled { + info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false) + if info.err == context.Canceled || info.err == context.DeadlineExceeded { return info.packageName, info.exports, info.err } // The cache structure wants things to proceed linearly. We can skip a diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go new file mode 100644 index 000000000..0c0dbb6a9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -0,0 +1,4 @@ +// Package packagesinternal exposes internal-only fields from go/packages. +package packagesinternal + +var GetForTest = func(p interface{}) string { return "" } diff --git a/vendor/golang.org/x/tools/internal/span/parse.go b/vendor/golang.org/x/tools/internal/span/parse.go deleted file mode 100644 index b3f268a38..000000000 --- a/vendor/golang.org/x/tools/internal/span/parse.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "strconv" - "strings" - "unicode/utf8" -) - -// Parse returns the location represented by the input. -// All inputs are valid locations, as they can always be a pure filename. -// The returned span will be normalized, and thus if printed may produce a -// different string. -func Parse(input string) Span { - // :0:0#0-0:0#0 - valid := input - var hold, offset int - hadCol := false - suf := rstripSuffix(input) - if suf.sep == "#" { - offset = suf.num - suf = rstripSuffix(suf.remains) - } - if suf.sep == ":" { - valid = suf.remains - hold = suf.num - hadCol = true - suf = rstripSuffix(suf.remains) - } - switch { - case suf.sep == ":": - return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), Point{}) - case suf.sep == "-": - // we have a span, fall out of the case to continue - default: - // separator not valid, rewind to either the : or the start - return New(NewURI(valid), NewPoint(hold, 0, offset), Point{}) - } - // only the span form can get here - // at this point we still don't know what the numbers we have mean - // if have not yet seen a : then we might have either a line or a column depending - // on whether start has a column or not - // we build an end point and will fix it later if needed - end := NewPoint(suf.num, hold, offset) - hold, offset = 0, 0 - suf = rstripSuffix(suf.remains) - if suf.sep == "#" { - offset = suf.num - suf = rstripSuffix(suf.remains) - } - if suf.sep != ":" { - // turns out we don't have a span after all, rewind - return New(NewURI(valid), end, Point{}) - } - valid = suf.remains - hold = suf.num - suf = rstripSuffix(suf.remains) - if suf.sep != ":" { - // line#offset only - return New(NewURI(valid), NewPoint(hold, 0, offset), end) - } - // we have a column, so if end only had one number, it is also the column - if !hadCol { - end = NewPoint(suf.num, end.v.Line, end.v.Offset) - } - return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), end) -} - -type suffix struct { - remains string - sep string - num int -} - -func rstripSuffix(input string) suffix { - if len(input) == 0 { - return suffix{"", "", -1} - } - remains := input - num := -1 - // first see if we have a number at the end - last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' }) - if last >= 0 && last < len(remains)-1 { - number, err := strconv.ParseInt(remains[last+1:], 10, 64) - if err == nil { - num = int(number) - remains = remains[:last+1] - } - } - // now see if we have a trailing separator - r, w := utf8.DecodeLastRuneInString(remains) - if r != ':' && r != '#' && r == '#' { - return suffix{input, "", -1} - } - remains = remains[:len(remains)-w] - return suffix{remains, string(r), num} -} diff --git a/vendor/golang.org/x/tools/internal/span/span.go b/vendor/golang.org/x/tools/internal/span/span.go deleted file mode 100644 index 4d2ad0986..000000000 --- a/vendor/golang.org/x/tools/internal/span/span.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package span contains support for representing with positions and ranges in -// text files. -package span - -import ( - "encoding/json" - "fmt" - "path" -) - -// Span represents a source code range in standardized form. -type Span struct { - v span -} - -// Point represents a single point within a file. -// In general this should only be used as part of a Span, as on its own it -// does not carry enough information. -type Point struct { - v point -} - -type span struct { - URI URI `json:"uri"` - Start point `json:"start"` - End point `json:"end"` -} - -type point struct { - Line int `json:"line"` - Column int `json:"column"` - Offset int `json:"offset"` -} - -// Invalid is a span that reports false from IsValid -var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}} - -var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}} - -// Converter is the interface to an object that can convert between line:column -// and offset forms for a single file. -type Converter interface { - //ToPosition converts from an offset to a line:column pair. - ToPosition(offset int) (int, int, error) - //ToOffset converts from a line:column pair to an offset. - ToOffset(line, col int) (int, error) -} - -func New(uri URI, start Point, end Point) Span { - s := Span{v: span{URI: uri, Start: start.v, End: end.v}} - s.v.clean() - return s -} - -func NewPoint(line, col, offset int) Point { - p := Point{v: point{Line: line, Column: col, Offset: offset}} - p.v.clean() - return p -} - -func Compare(a, b Span) int { - if r := CompareURI(a.URI(), b.URI()); r != 0 { - return r - } - if r := comparePoint(a.v.Start, b.v.Start); r != 0 { - return r - } - return comparePoint(a.v.End, b.v.End) -} - -func ComparePoint(a, b Point) int { - return comparePoint(a.v, b.v) -} - -func comparePoint(a, b point) int { - if !a.hasPosition() { - if a.Offset < b.Offset { - return -1 - } - if a.Offset > b.Offset { - return 1 - } - return 0 - } - if a.Line < b.Line { - return -1 - } - if a.Line > b.Line { - return 1 - } - if a.Column < b.Column { - return -1 - } - if a.Column > b.Column { - return 1 - } - return 0 -} - -func (s Span) HasPosition() bool { return s.v.Start.hasPosition() } -func (s Span) HasOffset() bool { return s.v.Start.hasOffset() } -func (s Span) IsValid() bool { return s.v.Start.isValid() } -func (s Span) IsPoint() bool { return s.v.Start == s.v.End } -func (s Span) URI() URI { return s.v.URI } -func (s Span) Start() Point { return Point{s.v.Start} } -func (s Span) End() Point { return Point{s.v.End} } -func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) } -func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) } - -func (p Point) HasPosition() bool { return p.v.hasPosition() } -func (p Point) HasOffset() bool { return p.v.hasOffset() } -func (p Point) IsValid() bool { return p.v.isValid() } -func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) } -func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) } -func (p Point) Line() int { - if !p.v.hasPosition() { - panic(fmt.Errorf("position not set in %v", p.v)) - } - return p.v.Line -} -func (p Point) Column() int { - if !p.v.hasPosition() { - panic(fmt.Errorf("position not set in %v", p.v)) - } - return p.v.Column -} -func (p Point) Offset() int { - if !p.v.hasOffset() { - panic(fmt.Errorf("offset not set in %v", p.v)) - } - return p.v.Offset -} - -func (p point) hasPosition() bool { return p.Line > 0 } -func (p point) hasOffset() bool { return p.Offset >= 0 } -func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() } -func (p point) isZero() bool { - return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0) -} - -func (s *span) clean() { - //this presumes the points are already clean - if !s.End.isValid() || (s.End == point{}) { - s.End = s.Start - } -} - -func (p *point) clean() { - if p.Line < 0 { - p.Line = 0 - } - if p.Column <= 0 { - if p.Line > 0 { - p.Column = 1 - } else { - p.Column = 0 - } - } - if p.Offset == 0 && (p.Line > 1 || p.Column > 1) { - p.Offset = -1 - } -} - -// Format implements fmt.Formatter to print the Location in a standard form. -// The format produced is one that can be read back in using Parse. -func (s Span) Format(f fmt.State, c rune) { - fullForm := f.Flag('+') - preferOffset := f.Flag('#') - // we should always have a uri, simplify if it is file format - //TODO: make sure the end of the uri is unambiguous - uri := string(s.v.URI) - if c == 'f' { - uri = path.Base(uri) - } else if !fullForm { - uri = s.v.URI.Filename() - } - fmt.Fprint(f, uri) - if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) { - return - } - // see which bits of start to write - printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition()) - printLine := s.HasPosition() && (fullForm || !printOffset) - printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1)) - fmt.Fprint(f, ":") - if printLine { - fmt.Fprintf(f, "%d", s.v.Start.Line) - } - if printColumn { - fmt.Fprintf(f, ":%d", s.v.Start.Column) - } - if printOffset { - fmt.Fprintf(f, "#%d", s.v.Start.Offset) - } - // start is written, do we need end? - if s.IsPoint() { - return - } - // we don't print the line if it did not change - printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line) - fmt.Fprint(f, "-") - if printLine { - fmt.Fprintf(f, "%d", s.v.End.Line) - } - if printColumn { - if printLine { - fmt.Fprint(f, ":") - } - fmt.Fprintf(f, "%d", s.v.End.Column) - } - if printOffset { - fmt.Fprintf(f, "#%d", s.v.End.Offset) - } -} - -func (s Span) WithPosition(c Converter) (Span, error) { - if err := s.update(c, true, false); err != nil { - return Span{}, err - } - return s, nil -} - -func (s Span) WithOffset(c Converter) (Span, error) { - if err := s.update(c, false, true); err != nil { - return Span{}, err - } - return s, nil -} - -func (s Span) WithAll(c Converter) (Span, error) { - if err := s.update(c, true, true); err != nil { - return Span{}, err - } - return s, nil -} - -func (s *Span) update(c Converter, withPos, withOffset bool) error { - if !s.IsValid() { - return fmt.Errorf("cannot add information to an invalid span") - } - if withPos && !s.HasPosition() { - if err := s.v.Start.updatePosition(c); err != nil { - return err - } - if s.v.End.Offset == s.v.Start.Offset { - s.v.End = s.v.Start - } else if err := s.v.End.updatePosition(c); err != nil { - return err - } - } - if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) { - if err := s.v.Start.updateOffset(c); err != nil { - return err - } - if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column { - s.v.End.Offset = s.v.Start.Offset - } else if err := s.v.End.updateOffset(c); err != nil { - return err - } - } - return nil -} - -func (p *point) updatePosition(c Converter) error { - line, col, err := c.ToPosition(p.Offset) - if err != nil { - return err - } - p.Line = line - p.Column = col - return nil -} - -func (p *point) updateOffset(c Converter) error { - offset, err := c.ToOffset(p.Line, p.Column) - if err != nil { - return err - } - p.Offset = offset - return nil -} diff --git a/vendor/golang.org/x/tools/internal/span/token.go b/vendor/golang.org/x/tools/internal/span/token.go deleted file mode 100644 index 4028eafa7..000000000 --- a/vendor/golang.org/x/tools/internal/span/token.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "fmt" - "go/token" -) - -// Range represents a source code range in token.Pos form. -// It also carries the FileSet that produced the positions, so that it is -// self contained. -type Range struct { - FileSet *token.FileSet - Start token.Pos - End token.Pos - Converter Converter -} - -// TokenConverter is a Converter backed by a token file set and file. -// It uses the file set methods to work out the conversions, which -// makes it fast and does not require the file contents. -type TokenConverter struct { - fset *token.FileSet - file *token.File -} - -// NewRange creates a new Range from a FileSet and two positions. -// To represent a point pass a 0 as the end pos. -func NewRange(fset *token.FileSet, start, end token.Pos) Range { - return Range{ - FileSet: fset, - Start: start, - End: end, - } -} - -// NewTokenConverter returns an implementation of Converter backed by a -// token.File. -func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter { - return &TokenConverter{fset: fset, file: f} -} - -// NewContentConverter returns an implementation of Converter for the -// given file content. -func NewContentConverter(filename string, content []byte) *TokenConverter { - fset := token.NewFileSet() - f := fset.AddFile(filename, -1, len(content)) - f.SetLinesForContent(content) - return &TokenConverter{fset: fset, file: f} -} - -// IsPoint returns true if the range represents a single point. -func (r Range) IsPoint() bool { - return r.Start == r.End -} - -// Span converts a Range to a Span that represents the Range. -// It will fill in all the members of the Span, calculating the line and column -// information. -func (r Range) Span() (Span, error) { - f := r.FileSet.File(r.Start) - if f == nil { - return Span{}, fmt.Errorf("file not found in FileSet") - } - var s Span - var err error - var startFilename string - startFilename, s.v.Start.Line, s.v.Start.Column, err = position(f, r.Start) - if err != nil { - return Span{}, err - } - s.v.URI = FileURI(startFilename) - if r.End.IsValid() { - var endFilename string - endFilename, s.v.End.Line, s.v.End.Column, err = position(f, r.End) - if err != nil { - return Span{}, err - } - // In the presence of line directives, a single File can have sections from - // multiple file names. - if endFilename != startFilename { - return Span{}, fmt.Errorf("span begins in file %q but ends in %q", startFilename, endFilename) - } - } - s.v.Start.clean() - s.v.End.clean() - s.v.clean() - if r.Converter != nil { - return s.WithOffset(r.Converter) - } - if startFilename != f.Name() { - return Span{}, fmt.Errorf("must supply Converter for file %q containing lines from %q", f.Name(), startFilename) - } - return s.WithOffset(NewTokenConverter(r.FileSet, f)) -} - -func position(f *token.File, pos token.Pos) (string, int, int, error) { - off, err := offset(f, pos) - if err != nil { - return "", 0, 0, err - } - return positionFromOffset(f, off) -} - -func positionFromOffset(f *token.File, offset int) (string, int, int, error) { - if offset > f.Size() { - return "", 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, f.Size()) - } - pos := f.Pos(offset) - p := f.Position(pos) - if offset == f.Size() { - return p.Filename, p.Line + 1, 1, nil - } - return p.Filename, p.Line, p.Column, nil -} - -// offset is a copy of the Offset function in go/token, but with the adjustment -// that it does not panic on invalid positions. -func offset(f *token.File, pos token.Pos) (int, error) { - if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() { - return 0, fmt.Errorf("invalid pos") - } - return int(pos) - f.Base(), nil -} - -// Range converts a Span to a Range that represents the Span for the supplied -// File. -func (s Span) Range(converter *TokenConverter) (Range, error) { - s, err := s.WithOffset(converter) - if err != nil { - return Range{}, err - } - // go/token will panic if the offset is larger than the file's size, - // so check here to avoid panicking. - if s.Start().Offset() > converter.file.Size() { - return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size()) - } - if s.End().Offset() > converter.file.Size() { - return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size()) - } - return Range{ - FileSet: converter.fset, - Start: converter.file.Pos(s.Start().Offset()), - End: converter.file.Pos(s.End().Offset()), - Converter: converter, - }, nil -} - -func (l *TokenConverter) ToPosition(offset int) (int, int, error) { - _, line, col, err := positionFromOffset(l.file, offset) - return line, col, err -} - -func (l *TokenConverter) ToOffset(line, col int) (int, error) { - if line < 0 { - return -1, fmt.Errorf("line is not valid") - } - lineMax := l.file.LineCount() + 1 - if line > lineMax { - return -1, fmt.Errorf("line is beyond end of file %v", lineMax) - } else if line == lineMax { - if col > 1 { - return -1, fmt.Errorf("column is beyond end of file") - } - // at the end of the file, allowing for a trailing eol - return l.file.Size(), nil - } - pos := lineStart(l.file, line) - if !pos.IsValid() { - return -1, fmt.Errorf("line is not in file") - } - // we assume that column is in bytes here, and that the first byte of a - // line is at column 1 - pos += token.Pos(col - 1) - return offset(l.file, pos) -} diff --git a/vendor/golang.org/x/tools/internal/span/token111.go b/vendor/golang.org/x/tools/internal/span/token111.go deleted file mode 100644 index bf7a5406b..000000000 --- a/vendor/golang.org/x/tools/internal/span/token111.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.12 - -package span - -import ( - "go/token" -) - -// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go -// versions <= 1.11, we borrow logic from the analysisutil package. -// TODO(rstambler): Delete this file when we no longer support Go 1.11. -func lineStart(f *token.File, line int) token.Pos { - // Use binary search to find the start offset of this line. - - min := 0 // inclusive - max := f.Size() // exclusive - for { - offset := (min + max) / 2 - pos := f.Pos(offset) - posn := f.Position(pos) - if posn.Line == line { - return pos - (token.Pos(posn.Column) - 1) - } - - if min+1 >= max { - return token.NoPos - } - - if posn.Line < line { - min = offset - } else { - max = offset - } - } -} diff --git a/vendor/golang.org/x/tools/internal/span/token112.go b/vendor/golang.org/x/tools/internal/span/token112.go deleted file mode 100644 index 017aec9c1..000000000 --- a/vendor/golang.org/x/tools/internal/span/token112.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.12 - -package span - -import ( - "go/token" -) - -// TODO(rstambler): Delete this file when we no longer support Go 1.11. -func lineStart(f *token.File, line int) token.Pos { - return f.LineStart(line) -} diff --git a/vendor/golang.org/x/tools/internal/span/uri.go b/vendor/golang.org/x/tools/internal/span/uri.go deleted file mode 100644 index e05a9e6ef..000000000 --- a/vendor/golang.org/x/tools/internal/span/uri.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "fmt" - "net/url" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "unicode" -) - -const fileScheme = "file" - -// URI represents the full URI for a file. -type URI string - -// Filename returns the file path for the given URI. -// It is an error to call this on a URI that is not a valid filename. -func (uri URI) Filename() string { - filename, err := filename(uri) - if err != nil { - panic(err) - } - return filepath.FromSlash(filename) -} - -func filename(uri URI) (string, error) { - if uri == "" { - return "", nil - } - u, err := url.ParseRequestURI(string(uri)) - if err != nil { - return "", err - } - if u.Scheme != fileScheme { - return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri) - } - if isWindowsDriveURI(u.Path) { - u.Path = u.Path[1:] - } - return u.Path, nil -} - -// NewURI returns a span URI for the string. -// It will attempt to detect if the string is a file path or uri. -func NewURI(s string) URI { - if u, err := url.PathUnescape(s); err == nil { - s = u - } - if strings.HasPrefix(s, fileScheme+"://") { - return URI(s) - } - return FileURI(s) -} - -func CompareURI(a, b URI) int { - if equalURI(a, b) { - return 0 - } - if a < b { - return -1 - } - return 1 -} - -func equalURI(a, b URI) bool { - if a == b { - return true - } - // If we have the same URI basename, we may still have the same file URIs. - if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) { - return false - } - fa, err := filename(a) - if err != nil { - return false - } - fb, err := filename(b) - if err != nil { - return false - } - // Stat the files to check if they are equal. - infoa, err := os.Stat(filepath.FromSlash(fa)) - if err != nil { - return false - } - infob, err := os.Stat(filepath.FromSlash(fb)) - if err != nil { - return false - } - return os.SameFile(infoa, infob) -} - -// FileURI returns a span URI for the supplied file path. -// It will always have the file scheme. -func FileURI(path string) URI { - if path == "" { - return "" - } - // Handle standard library paths that contain the literal "$GOROOT". - // TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT. - const prefix = "$GOROOT" - if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) { - suffix := path[len(prefix):] - path = runtime.GOROOT() + suffix - } - if !isWindowsDrivePath(path) { - if abs, err := filepath.Abs(path); err == nil { - path = abs - } - } - // Check the file path again, in case it became absolute. - if isWindowsDrivePath(path) { - path = "/" + path - } - path = filepath.ToSlash(path) - u := url.URL{ - Scheme: fileScheme, - Path: path, - } - uri := u.String() - if unescaped, err := url.PathUnescape(uri); err == nil { - uri = unescaped - } - return URI(uri) -} - -// isWindowsDrivePath returns true if the file path is of the form used by -// Windows. We check if the path begins with a drive letter, followed by a ":". -func isWindowsDrivePath(path string) bool { - if len(path) < 4 { - return false - } - return unicode.IsLetter(rune(path[0])) && path[1] == ':' -} - -// isWindowsDriveURI returns true if the file URI is of the format used by -// Windows URIs. The url.Parse package does not specially handle Windows paths -// (see https://golang.org/issue/6027). We check if the URI path has -// a drive prefix (e.g. "/C:"). If so, we trim the leading "/". -func isWindowsDriveURI(uri string) bool { - if len(uri) < 4 { - return false - } - return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':' -} diff --git a/vendor/golang.org/x/tools/internal/span/utf16.go b/vendor/golang.org/x/tools/internal/span/utf16.go deleted file mode 100644 index 561b3fa50..000000000 --- a/vendor/golang.org/x/tools/internal/span/utf16.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "fmt" - "unicode/utf16" - "unicode/utf8" -) - -// ToUTF16Column calculates the utf16 column expressed by the point given the -// supplied file contents. -// This is used to convert from the native (always in bytes) column -// representation and the utf16 counts used by some editors. -func ToUTF16Column(p Point, content []byte) (int, error) { - if content == nil { - return -1, fmt.Errorf("ToUTF16Column: missing content") - } - if !p.HasPosition() { - return -1, fmt.Errorf("ToUTF16Column: point is missing position") - } - if !p.HasOffset() { - return -1, fmt.Errorf("ToUTF16Column: point is missing offset") - } - offset := p.Offset() // 0-based - colZero := p.Column() - 1 // 0-based - if colZero == 0 { - // 0-based column 0, so it must be chr 1 - return 1, nil - } else if colZero < 0 { - return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero) - } - // work out the offset at the start of the line using the column - lineOffset := offset - colZero - if lineOffset < 0 || offset > len(content) { - return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content)) - } - // Use the offset to pick out the line start. - // This cannot panic: offset > len(content) and lineOffset < offset. - start := content[lineOffset:] - - // Now, truncate down to the supplied column. - start = start[:colZero] - - // and count the number of utf16 characters - // in theory we could do this by hand more efficiently... - return len(utf16.Encode([]rune(string(start)))) + 1, nil -} - -// FromUTF16Column advances the point by the utf16 character offset given the -// supplied line contents. -// This is used to convert from the utf16 counts used by some editors to the -// native (always in bytes) column representation. -func FromUTF16Column(p Point, chr int, content []byte) (Point, error) { - if !p.HasOffset() { - return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset") - } - // if chr is 1 then no adjustment needed - if chr <= 1 { - return p, nil - } - if p.Offset() >= len(content) { - return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content)) - } - remains := content[p.Offset():] - // scan forward the specified number of characters - for count := 1; count < chr; count++ { - if len(remains) <= 0 { - return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content") - } - r, w := utf8.DecodeRune(remains) - if r == '\n' { - // Per the LSP spec: - // - // > If the character value is greater than the line length it - // > defaults back to the line length. - break - } - remains = remains[w:] - if r >= 0x10000 { - // a two point rune - count++ - // if we finished in a two point rune, do not advance past the first - if count >= chr { - break - } - } - p.v.Column += w - p.v.Offset += w - } - return p, nil -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8439a21a2..b806bf2db 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -738,7 +738,7 @@ golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm # golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/time/rate -# golang.org/x/tools v0.0.0-20191203051722-db047d72ee39 +# golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa golang.org/x/tools/cmd/goimports golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/passes/inspect @@ -751,12 +751,13 @@ golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath golang.org/x/tools/go/types/typeutil +golang.org/x/tools/imports golang.org/x/tools/internal/fastwalk golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports golang.org/x/tools/internal/module +golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/semver -golang.org/x/tools/internal/span # google.golang.org/api v0.14.0 google.golang.org/api/compute/v1 google.golang.org/api/googleapi From 38fe16e01f8c9bad6ccd395f7c66ac1cea7f2172 Mon Sep 17 00:00:00 2001 From: Luba Grinkevich Date: Mon, 10 Feb 2020 18:36:19 +0300 Subject: [PATCH 21/61] Add service account ID to config #8716 --- builder/yandex/config.go | 2 ++ builder/yandex/step_create_instance.go | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/builder/yandex/config.go b/builder/yandex/config.go index 5d3952a6a..cbc07bdfe 100644 --- a/builder/yandex/config.go +++ b/builder/yandex/config.go @@ -41,6 +41,8 @@ type Config struct { // is an alternative method to authenticate to Yandex.Cloud. Alternatively you may set environment variable // YC_SERVICE_ACCOUNT_KEY_FILE. ServiceAccountKeyFile string `mapstructure:"service_account_key_file" required:"false"` + // Service account identifier to assign to instance + ServiceAccountID string `mapstructure:"service_account_id" required:"false"` // OAuth token to use to authenticate to Yandex.Cloud. Alternatively you may set // value by environment variable YC_TOKEN. Token string `mapstructure:"token" required:"true"` diff --git a/builder/yandex/step_create_instance.go b/builder/yandex/step_create_instance.go index b13cbca1e..2c4522d90 100644 --- a/builder/yandex/step_create_instance.go +++ b/builder/yandex/step_create_instance.go @@ -205,6 +205,10 @@ runcmd: }, } + if config.ServiceAccountID != "" { + req.ServiceAccountId = config.ServiceAccountID + } + if config.UseIPv6 { req.NetworkInterfaceSpecs[0].PrimaryV6AddressSpec = &compute.PrimaryAddressSpec{} } From a067b23e6d2f0facdbfd9f621b7b73275954bca0 Mon Sep 17 00:00:00 2001 From: Luba Grinkevich Date: Mon, 10 Feb 2020 18:51:29 +0300 Subject: [PATCH 22/61] Execute `make generate` #8716 --- builder/yandex/config.hcl2spec.go | 2 ++ .../source/partials/builder/yandex/_Config-not-required.html.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/builder/yandex/config.hcl2spec.go b/builder/yandex/config.hcl2spec.go index 1fb0db29b..a3c10418b 100644 --- a/builder/yandex/config.hcl2spec.go +++ b/builder/yandex/config.hcl2spec.go @@ -59,6 +59,7 @@ type FlatConfig struct { Endpoint *string `mapstructure:"endpoint" required:"false" cty:"endpoint"` FolderID *string `mapstructure:"folder_id" required:"true" cty:"folder_id"` ServiceAccountKeyFile *string `mapstructure:"service_account_key_file" required:"false" cty:"service_account_key_file"` + ServiceAccountID *string `mapstructure:"service_account_id" required:"false" cty:"service_account_id"` Token *string `mapstructure:"token" required:"true" cty:"token"` DiskName *string `mapstructure:"disk_name" required:"false" cty:"disk_name"` DiskSizeGb *int `mapstructure:"disk_size_gb" required:"false" cty:"disk_size_gb"` @@ -153,6 +154,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { "endpoint": &hcldec.AttrSpec{Name: "endpoint", Type: cty.String, Required: false}, "folder_id": &hcldec.AttrSpec{Name: "folder_id", Type: cty.String, Required: false}, "service_account_key_file": &hcldec.AttrSpec{Name: "service_account_key_file", Type: cty.String, Required: false}, + "service_account_id": &hcldec.AttrSpec{Name: "service_account_id", Type: cty.String, Required: false}, "token": &hcldec.AttrSpec{Name: "token", Type: cty.String, Required: false}, "disk_name": &hcldec.AttrSpec{Name: "disk_name", Type: cty.String, Required: false}, "disk_size_gb": &hcldec.AttrSpec{Name: "disk_size_gb", Type: cty.Number, Required: false}, diff --git a/website/source/partials/builder/yandex/_Config-not-required.html.md b/website/source/partials/builder/yandex/_Config-not-required.html.md index 8a0674934..36daf5a77 100644 --- a/website/source/partials/builder/yandex/_Config-not-required.html.md +++ b/website/source/partials/builder/yandex/_Config-not-required.html.md @@ -6,6 +6,8 @@ is an alternative method to authenticate to Yandex.Cloud. Alternatively you may set environment variable YC_SERVICE_ACCOUNT_KEY_FILE. +- `service_account_id` (string) - Service account identifier to assign to instance + - `disk_name` (string) - The name of the disk, if unset the instance name will be used. From cc3d941853c47fc099ba6bd3ea133d8e818cb67a Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Mon, 10 Feb 2020 17:54:11 +0100 Subject: [PATCH 23/61] iso checksumming: use checksum or checksum url if set to handle all cases fix #8322 --- common/iso_config.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/common/iso_config.go b/common/iso_config.go index 0836035c8..338bafaf5 100644 --- a/common/iso_config.go +++ b/common/iso_config.go @@ -7,7 +7,6 @@ import ( "encoding/hex" "errors" "fmt" - "net/url" "strings" getter "github.com/hashicorp/go-getter/v2" @@ -160,14 +159,13 @@ func (c *ISOConfig) Prepare(*interpolate.Context) (warnings []string, errs []err errs = append(errs, fmt.Errorf("A checksum must be specified")) } if c.ISOChecksumType == "file" { - u, err := url.Parse(c.ISOUrls[0]) - if err != nil { - errs = append(errs, fmt.Errorf("error parsing URL <%s>: %s", - c.ISOUrls[0], err)) + url := c.ISOChecksum + if c.ISOChecksumURL != "" { + url = c.ISOChecksumURL } - cksum, err := getter.DefaultClient.ChecksumFromFile(context.TODO(), c.ISOChecksumURL, u.Path) - if cksum == nil || err != nil { - errs = append(errs, fmt.Errorf("Couldn't extract checksum from checksum file")) + cksum, err := getter.DefaultClient.ChecksumFromFile(context.TODO(), url, c.ISOUrls[0]) + if err != nil { + errs = append(errs, fmt.Errorf("Couldn't extract checksum from checksum file: %v", err)) } else { c.ISOChecksumType = cksum.Type c.ISOChecksum = hex.EncodeToString(cksum.Value) From 1a78821ca0a2e092335123f51332af73e818260e Mon Sep 17 00:00:00 2001 From: nicolelyn Date: Mon, 10 Feb 2020 14:45:58 -0500 Subject: [PATCH 24/61] website: bump middleman version --- website/Gemfile | 2 +- website/Gemfile.lock | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/website/Gemfile b/website/Gemfile index 928dcea37..f4cbda7f2 100644 --- a/website/Gemfile +++ b/website/Gemfile @@ -1,3 +1,3 @@ source "https://rubygems.org" -gem "middleman-hashicorp", "0.3.41" +gem "middleman-hashicorp", "0.3.43" diff --git a/website/Gemfile.lock b/website/Gemfile.lock index efd35b165..bb39983ca 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -78,7 +78,7 @@ GEM rack (>= 1.4.5, < 2.0) thor (>= 0.15.2, < 2.0) tilt (~> 1.4.1, < 2.0) - middleman-hashicorp (0.3.41) + middleman-hashicorp (0.3.43) bootstrap-sass (~> 3.3) builder (~> 3.2) middleman (~> 3.4) @@ -155,7 +155,7 @@ PLATFORMS ruby DEPENDENCIES - middleman-hashicorp (= 0.3.41) + middleman-hashicorp (= 0.3.43) BUNDLED WITH 1.17.1 From 1f4c9170c64c8a9c169136dbc5a79195425e7e2e Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Mon, 10 Feb 2020 17:06:07 -0500 Subject: [PATCH 25/61] Update CHANGELOG.md --- CHANGELOG.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 222a32ffb..8d57b7916 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,16 +1,25 @@ ## 1.5.2 (Upcoming) -** New Builder ** The vsphere-iso builder, previously maintained by JetBrains, +**New Builder** The vsphere-iso builder, previously maintained by JetBrains, has been merged with the Packer core. It will be officially supported by the Packer team at HashiCorp moving forward. [GH-8480] ### IMPROVEMENTS: * builder/alicloud: Add AlicloudProfile option. [GH-8560] * builder/amazon: Add source AMI owner ID/name to template engines [GH-8550] +* builder/amazon: Update instance waiters to use global waiter settings set by + `AWS_POLL_DELAY_SECONDS` and `AWS_TIMEOUT_SECONDS` [GH-8699] +* builder/azure: Allow users to use custom key vault for storing Windows + certificates [GH-8704] * builder/azure: Set expiry for image versions in SIG [GH-8561] * builder/proxmox: Add option to upload the boot ISO rather than pointing out a previously manually uploaded one. [GH-8624] * builder/vagrant: Fix a crash in the Vagrant driver [GH-8607] +* builder/yandex: Add service account ID to config [GH-8717] +* core: Add `PACKER_PLUGIN_PATH` to list of supported paths for plugin + discovery [GH-8616] * core: clean up messy log line in plugin execution. [GH-8542] +* core: Ensure `PACKER_HTTP_ADDR` is always set for any builder that provides a + HTTP server for file transfer [GH-8654] * core: Fix loading external plugins defined in PACKER_CONFIG [GH-8582] * core: Log name of postprocessor running to disambiguate long chains of post- processors. [GH-8613] @@ -26,13 +35,19 @@ Packer team at HashiCorp moving forward. [GH-8480] * builder/amazon: Allow AWS builder pre-validation to pass when subnet filters are present [GH-8622] * builder/azure: Fix bug where deployments were not being cleaned up: [GH-8496] +* builder/azure: Fix issue where WinRMPassword was being left unset [GH-8670] +* builder/lxd: Fix file uploading issue when using the file provisioner + [GH-8636] * builder/null: Fix crash when configuring builder using HCL2. [GH-8612] * builder/osc: Fix ssh host detection in Public Cloud and Nets [GH-8414] * builder/vagrant: Fix bug with reading key from a path with spaces [GH-8605] * builder/virtualbox-ovf: Remove config dependency from StepImport [GH-8509] * builder/virtualbox-vm: use config as a non pointer to avoid a panic [GH-8576] +* communicator/winrm: Fix issue where the value of `winrm_host` was being + ignored for some builders [GH-8615] * core: Fix crash when build.sources is set to an invalid name [GH-8569] * core: Fix error loading .packerconfig [GH-8623] +* core: Fix loading local ISO files when using `iso_target_path` [GH-8689] * core: Fix loading of external plugins. GH-8543] * post-processor/docker-tag: Fix regression if no tags were specified. [GH-8593] From 8b482933cffda27124b5351685fc253025bfb3f5 Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Wed, 5 Feb 2020 17:15:40 -0500 Subject: [PATCH 26/61] docs/azure-arm: Refactor docs to clear up required options This changes separates the required configurations options needed when using Azure Marketplace images vs Custom user built Images. --- .../docs/builders/azure-arm.html.md.erb | 79 +++++++++++-------- 1 file changed, 47 insertions(+), 32 deletions(-) diff --git a/website/source/docs/builders/azure-arm.html.md.erb b/website/source/docs/builders/azure-arm.html.md.erb index 77d01a294..1c9bd3777 100644 --- a/website/source/docs/builders/azure-arm.html.md.erb +++ b/website/source/docs/builders/azure-arm.html.md.erb @@ -9,18 +9,12 @@ sidebar_current: 'docs-builders-azure-arm' Type: `azure-arm` -Packer supports building VHDs in [Azure Resource +Packer supports building VHDs and Managed Images in [Azure Resource Manager](https://azure.microsoft.com/en-us/documentation/articles/resource-group-overview/). Azure provides new users a [$200 credit for the first 30 days](https://azure.microsoft.com/en-us/free/); after which you will incur costs for VMs built and stored using Packer. -Unlike most Packer builders, the artifact produced by the ARM builder is a VHD -(virtual hard disk), not a full virtual machine image. This means you will need -to [perform some additional -steps](https://github.com/Azure/packer-azure/issues/201) in order to launch a -VM from your build artifact. - Azure uses a combination of OAuth and Active Directory to authorize requests to the ARM API. Learn how to [authorize access to ARM](/docs/builders/azure.html#authentication-for-azure). @@ -28,20 +22,22 @@ ARM](/docs/builders/azure.html#authentication-for-azure). The documentation below references command output from the [Azure CLI](https://azure.microsoft.com/en-us/documentation/articles/xplat-cli-install/). -## Configuration Reference +## Authentication: +There are three options for Authenticationg to Azure, two of which require certain +configuration options in order to properly build an Azure Arm Image. -The following configuration options are available for building Azure images. In -addition to the options listed here, a -[communicator](/docs/templates/communicator.html) can be configured for this -builder. +### Managed Identity -### Required options for authentication: If you're running packer on an Azure VM with a [managed identity](/docs/builders/azure.html#azure-managed-identity) -you don't need to specify any additional configuration options. -If you would like to use interactive user authentication, you should specify -`subscription_id` only. Packer will use cached credentials or redirect you -to a website to log in. -If you want to use a [service principal](/docs/builders/azure.html#azure-active-directory-service-principal) +you don't need to specify any additional configuration options. As Packer will attempt to use the Managed Identity +and subscription of the VM that Packer is running on. + +### Interactive User Authentication +To use interactive user authentication, you should specify `subscription_id` only. +Packer will use cached credentials or redirect you to a website to log in. + +### Service Principal +To use a [service principal](/docs/builders/azure.html#azure-active-directory-service-principal) you should specify `subscription_id`, `client_id` and one of `client_secret`, `client_cert_path` or `client_jwt`. @@ -64,16 +60,34 @@ you should specify `subscription_id`, `client_id` and one of `client_secret`, Directory docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-certificate-credentials) for more information. -### Required: +<%= partial "partials/builder/azure/common/client/_Config" %> -<%= partial "partials/builder/azure/arm/Config-required" %> +## Configuration Reference + +The following configuration options are available for building Azure images. In +addition to the options listed here, a [communicator](/docs/templates/communicator.html) can be configured for this +builder. -#### VHD or Managed Image The Azure builder can create either a VHD, or a managed image. If you are creating a VHD, you **must** start with a VHD. Likewise, if you want to create -a managed image you **must** start with a managed image. When creating a VHD -the following options are required. +a managed image you **must** start with a managed image. Images can be obtained from +the Azure Marketplace or from within a users' subscription - see [Using Custom Images](#using-custom-images) + +### Using Azure Marketplace Images + +<%= partial "partials/builder/azure/arm/Config-required" %> + +### Using Custom Images + +- `image_url` (string) - Specify a custom VHD to use. If this value is set, do not set image_publisher, image_offer, image_sku, or image_version. + +- `custom_managed_image_resource_group_name` (string) - Specify the source managed image's resource group used to use. If this value is set, do not set image_publisher, image_offer, image_sku, or image_version. If this value is set, the value custom_managed_image_name must also be set. See documentation to learn more about managed images. + +- `custom_managed_image_name` (string) - Specify the source managed image's name to use. If this value is set, do not set image_publisher, image_offer, image_sku, or image_version. If this value is set, the value custom_managed_image_resource_group_name must also be set. See documentation to learn more about managed images. + + +When creating a VHD the following options are required: - `capture_container_name` (string) - Destination container name. Essentially the "directory" where your VHD will be organized in Azure. The captured @@ -89,7 +103,7 @@ the following options are required. - `storage_account` (string) - Storage account under which the final artifact will be stored. -When creating a managed image the following options are required. +When creating a Managed Imaged the following options are required: - `managed_image_name` (string) - Specify the managed image name where the result of the Packer build will be saved. The image name must not exist @@ -108,7 +122,9 @@ When creating a managed image the following options are required. Managed images can optionally be published to [Shared Image Gallery](https://azure.microsoft.com/en-us/blog/announcing-the-public-preview-of-shared-image-gallery/) as Shared Gallery Image version. Shared Image Gallery **only** works with Managed Images. **A VHD cannot be published to -a Shared Image Gallery**. When publishing to a Shared Image Gallery the following options are required. +a Shared Image Gallery**. + +When publishing to a Shared Image Gallery the following options are required. - `shared_image_gallery_destination` (object) The name of the Shared Image Gallery under which the managed image will be published as Shared Gallery Image version. @@ -124,6 +140,12 @@ Following is an example. "managed_image_name": "TargetImageName", "managed_image_resource_group_name": "TargetResourceGroup" + +### Optional: + +<%= partial "partials/builder/azure/arm/Config-not-required" %> +<%= partial "partials/builder/azure/common/client/_Config-not-required" %> + #### Resource Group Usage The Azure builder can either provision resources into a new resource group that @@ -155,13 +177,6 @@ To use an existing resource group you **must** provide: Providing `temp_resource_group_name` or `location` in combination with `build_resource_group_name` is not allowed. -<%= partial "partials/builder/azure/common/client/_Config" %> - -### Optional: - -<%= partial "partials/builder/azure/arm/Config-not-required" %> -<%= partial "partials/builder/azure/common/client/_Config-not-required" %> - ## Basic Example Here is a basic example for Azure. From c99dc565440438e7c4ceac8fc0897e41131c4702 Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Thu, 6 Feb 2020 17:00:00 -0500 Subject: [PATCH 27/61] Update Azure ARM documentation * Change generated documentation to indicate what image selection options are required, and when. * Refactor the structure of the documentation to highlight required options for different types of builds --- builder/azure/arm/config.go | 32 ++-- builder/azure/arm/config.hcl2spec.go | 158 +++++++++--------- .../docs/builders/azure-arm.html.md.erb | 58 +++---- .../azure/arm/_Config-not-required.html.md | 17 -- .../azure/arm/_Config-required.html.md | 22 ++- 5 files changed, 137 insertions(+), 150 deletions(-) diff --git a/builder/azure/arm/config.go b/builder/azure/arm/config.go index 3b701eae7..5c99e1569 100644 --- a/builder/azure/arm/config.go +++ b/builder/azure/arm/config.go @@ -147,20 +147,20 @@ type Config struct { // If set to true, Virtual Machines deployed from the latest version of the // Image Definition won't use this Image Version. SharedGalleryImageVersionExcludeFromLatest bool `mapstructure:"shared_gallery_image_version_exclude_from_latest" required:"false"` - // PublisherName for your base image. See + // Name of the publisher to use for your base image (Azure Marketplace Images only). See // [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/) // for details. // // CLI example `az vm image list-publishers --location westus` ImagePublisher string `mapstructure:"image_publisher" required:"true"` - // Offer for your base image. See + // Name of the publisher's offer to use for your base image (Azure Marketplace Images only). See // [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/) // for details. // // CLI example // `az vm image list-offers --location westus --publisher Canonical` ImageOffer string `mapstructure:"image_offer" required:"true"` - // SKU for your base image. See + // SKU of the image offer to use for your base image (Azure Marketplace Images only). See // [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/) // for details. // @@ -176,24 +176,24 @@ type Config struct { // CLI example // `az vm image list --location westus --publisher Canonical --offer UbuntuServer --sku 16.04.0-LTS --all` ImageVersion string `mapstructure:"image_version" required:"false"` - // Specify a custom VHD to use. If this value is set, do + // URL to a custom VHD to use for your base image. If this value is set, do + // not set image_publisher, image_offer, image_sku, or image_version. + ImageUrl string `mapstructure:"image_url" required:"true"` + // Name of a custom managed image to use for your base image. If this value is set, do // not set image_publisher, image_offer, image_sku, or image_version. - ImageUrl string `mapstructure:"image_url" required:"false"` - // Specify the source managed image's resource group used to use. If this - // value is set, do not set image\_publisher, image\_offer, image\_sku, or - // image\_version. If this value is set, the value - // `custom_managed_image_name` must also be set. See - // [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images) - // to learn more about managed images. - CustomManagedImageResourceGroupName string `mapstructure:"custom_managed_image_resource_group_name" required:"false"` - // Specify the source managed image's name to use. If this value is set, do - // not set image\_publisher, image\_offer, image\_sku, or image\_version. // If this value is set, the value // `custom_managed_image_resource_group_name` must also be set. See // [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images) // to learn more about managed images. - CustomManagedImageName string `mapstructure:"custom_managed_image_name" required:"false"` - customManagedImageID string + CustomManagedImageName string `mapstructure:"custom_managed_image_name" required:"true"` + + // Name of a custom managed image's resource group to use for your base image. If this + // value is set, image_publisher, image_offer, image_sku, or image_version. + // `custom_managed_image_name` must also be set. See + // [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images) + // to learn more about managed images. + CustomManagedImageResourceGroupName string `mapstructure:"custom_managed_image_resource_group_name" required:"true"` + customManagedImageID string Location string `mapstructure:"location"` // Size of the VM used for building. This can be changed when you deploy a diff --git a/builder/azure/arm/config.hcl2spec.go b/builder/azure/arm/config.hcl2spec.go index c3fbc8036..0224a90ee 100644 --- a/builder/azure/arm/config.hcl2spec.go +++ b/builder/azure/arm/config.hcl2spec.go @@ -36,9 +36,9 @@ type FlatConfig struct { ImageOffer *string `mapstructure:"image_offer" required:"true" cty:"image_offer"` ImageSku *string `mapstructure:"image_sku" required:"true" cty:"image_sku"` ImageVersion *string `mapstructure:"image_version" required:"false" cty:"image_version"` - ImageUrl *string `mapstructure:"image_url" required:"false" cty:"image_url"` - CustomManagedImageResourceGroupName *string `mapstructure:"custom_managed_image_resource_group_name" required:"false" cty:"custom_managed_image_resource_group_name"` - CustomManagedImageName *string `mapstructure:"custom_managed_image_name" required:"false" cty:"custom_managed_image_name"` + ImageUrl *string `mapstructure:"image_url" required:"true" cty:"image_url"` + CustomManagedImageName *string `mapstructure:"custom_managed_image_name" required:"true" cty:"custom_managed_image_name"` + CustomManagedImageResourceGroupName *string `mapstructure:"custom_managed_image_resource_group_name" required:"true" cty:"custom_managed_image_resource_group_name"` Location *string `mapstructure:"location" cty:"location"` VMSize *string `mapstructure:"vm_size" required:"false" cty:"vm_size"` ManagedImageResourceGroupName *string `mapstructure:"managed_image_resource_group_name" cty:"managed_image_resource_group_name"` @@ -146,83 +146,83 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { "shared_gallery_image_version_end_of_life_date": &hcldec.AttrSpec{Name: "shared_gallery_image_version_end_of_life_date", Type: cty.String, Required: false}, "shared_image_gallery_replica_count": &hcldec.AttrSpec{Name: "shared_image_gallery_replica_count", Type: cty.Number, Required: false}, "shared_gallery_image_version_exclude_from_latest": &hcldec.AttrSpec{Name: "shared_gallery_image_version_exclude_from_latest", Type: cty.Bool, Required: false}, - "image_publisher": &hcldec.AttrSpec{Name: "image_publisher", Type: cty.String, Required: false}, - "image_offer": &hcldec.AttrSpec{Name: "image_offer", Type: cty.String, Required: false}, - "image_sku": &hcldec.AttrSpec{Name: "image_sku", Type: cty.String, Required: false}, - "image_version": &hcldec.AttrSpec{Name: "image_version", Type: cty.String, Required: false}, - "image_url": &hcldec.AttrSpec{Name: "image_url", Type: cty.String, Required: false}, + "image_publisher": &hcldec.AttrSpec{Name: "image_publisher", Type: cty.String, Required: false}, + "image_offer": &hcldec.AttrSpec{Name: "image_offer", Type: cty.String, Required: false}, + "image_sku": &hcldec.AttrSpec{Name: "image_sku", Type: cty.String, Required: false}, + "image_version": &hcldec.AttrSpec{Name: "image_version", Type: cty.String, Required: false}, + "image_url": &hcldec.AttrSpec{Name: "image_url", Type: cty.String, Required: false}, + "custom_managed_image_name": &hcldec.AttrSpec{Name: "custom_managed_image_name", Type: cty.String, Required: false}, "custom_managed_image_resource_group_name": &hcldec.AttrSpec{Name: "custom_managed_image_resource_group_name", Type: cty.String, Required: false}, - "custom_managed_image_name": &hcldec.AttrSpec{Name: "custom_managed_image_name", Type: cty.String, Required: false}, - "location": &hcldec.AttrSpec{Name: "location", Type: cty.String, Required: false}, - "vm_size": &hcldec.AttrSpec{Name: "vm_size", Type: cty.String, Required: false}, - "managed_image_resource_group_name": &hcldec.AttrSpec{Name: "managed_image_resource_group_name", Type: cty.String, Required: false}, - "managed_image_name": &hcldec.AttrSpec{Name: "managed_image_name", Type: cty.String, Required: false}, - "managed_image_storage_account_type": &hcldec.AttrSpec{Name: "managed_image_storage_account_type", Type: cty.String, Required: false}, - "managed_image_os_disk_snapshot_name": &hcldec.AttrSpec{Name: "managed_image_os_disk_snapshot_name", Type: cty.String, Required: false}, - "managed_image_data_disk_snapshot_prefix": &hcldec.AttrSpec{Name: "managed_image_data_disk_snapshot_prefix", Type: cty.String, Required: false}, - "managed_image_zone_resilient": &hcldec.AttrSpec{Name: "managed_image_zone_resilient", Type: cty.Bool, Required: false}, - "azure_tags": &hcldec.BlockAttrsSpec{TypeName: "azure_tags", ElementType: cty.String, Required: false}, - "resource_group_name": &hcldec.AttrSpec{Name: "resource_group_name", Type: cty.String, Required: false}, - "storage_account": &hcldec.AttrSpec{Name: "storage_account", Type: cty.String, Required: false}, - "temp_compute_name": &hcldec.AttrSpec{Name: "temp_compute_name", Type: cty.String, Required: false}, - "temp_resource_group_name": &hcldec.AttrSpec{Name: "temp_resource_group_name", Type: cty.String, Required: false}, - "build_resource_group_name": &hcldec.AttrSpec{Name: "build_resource_group_name", Type: cty.String, Required: false}, - "build_key_vault_name": &hcldec.AttrSpec{Name: "build_key_vault_name", Type: cty.String, Required: false}, - "private_virtual_network_with_public_ip": &hcldec.AttrSpec{Name: "private_virtual_network_with_public_ip", Type: cty.Bool, Required: false}, - "virtual_network_name": &hcldec.AttrSpec{Name: "virtual_network_name", Type: cty.String, Required: false}, - "virtual_network_subnet_name": &hcldec.AttrSpec{Name: "virtual_network_subnet_name", Type: cty.String, Required: false}, - "virtual_network_resource_group_name": &hcldec.AttrSpec{Name: "virtual_network_resource_group_name", Type: cty.String, Required: false}, - "custom_data_file": &hcldec.AttrSpec{Name: "custom_data_file", Type: cty.String, Required: false}, - "plan_info": &hcldec.BlockSpec{TypeName: "plan_info", Nested: hcldec.ObjectSpec((*FlatPlanInformation)(nil).HCL2Spec())}, - "polling_duration_timeout": &hcldec.AttrSpec{Name: "polling_duration_timeout", Type: cty.String, Required: false}, - "os_type": &hcldec.AttrSpec{Name: "os_type", Type: cty.String, Required: false}, - "os_disk_size_gb": &hcldec.AttrSpec{Name: "os_disk_size_gb", Type: cty.Number, Required: false}, - "disk_additional_size": &hcldec.AttrSpec{Name: "disk_additional_size", Type: cty.List(cty.Number), Required: false}, - "disk_caching_type": &hcldec.AttrSpec{Name: "disk_caching_type", Type: cty.String, Required: false}, - "allowed_inbound_ip_addresses": &hcldec.AttrSpec{Name: "allowed_inbound_ip_addresses", Type: cty.List(cty.String), Required: false}, - "user_name": &hcldec.AttrSpec{Name: "user_name", Type: cty.String, Required: false}, - "password": &hcldec.AttrSpec{Name: "password", Type: cty.String, Required: false}, - "communicator": &hcldec.AttrSpec{Name: "communicator", Type: cty.String, Required: false}, - "pause_before_connecting": &hcldec.AttrSpec{Name: "pause_before_connecting", Type: cty.String, Required: false}, - "ssh_host": &hcldec.AttrSpec{Name: "ssh_host", Type: cty.String, Required: false}, - "ssh_port": &hcldec.AttrSpec{Name: "ssh_port", Type: cty.Number, Required: false}, - "ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false}, - "ssh_password": &hcldec.AttrSpec{Name: "ssh_password", Type: cty.String, Required: false}, - "ssh_keypair_name": &hcldec.AttrSpec{Name: "ssh_keypair_name", Type: cty.String, Required: false}, - "temporary_key_pair_name": &hcldec.AttrSpec{Name: "temporary_key_pair_name", Type: cty.String, Required: false}, - "ssh_clear_authorized_keys": &hcldec.AttrSpec{Name: "ssh_clear_authorized_keys", Type: cty.Bool, Required: false}, - "ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false}, - "ssh_pty": &hcldec.AttrSpec{Name: "ssh_pty", Type: cty.Bool, Required: false}, - "ssh_timeout": &hcldec.AttrSpec{Name: "ssh_timeout", Type: cty.String, Required: false}, - "ssh_agent_auth": &hcldec.AttrSpec{Name: "ssh_agent_auth", Type: cty.Bool, Required: false}, - "ssh_disable_agent_forwarding": &hcldec.AttrSpec{Name: "ssh_disable_agent_forwarding", Type: cty.Bool, Required: false}, - "ssh_handshake_attempts": &hcldec.AttrSpec{Name: "ssh_handshake_attempts", Type: cty.Number, Required: false}, - "ssh_bastion_host": &hcldec.AttrSpec{Name: "ssh_bastion_host", Type: cty.String, Required: false}, - "ssh_bastion_port": &hcldec.AttrSpec{Name: "ssh_bastion_port", Type: cty.Number, Required: false}, - "ssh_bastion_agent_auth": &hcldec.AttrSpec{Name: "ssh_bastion_agent_auth", Type: cty.Bool, Required: false}, - "ssh_bastion_username": &hcldec.AttrSpec{Name: "ssh_bastion_username", Type: cty.String, Required: false}, - "ssh_bastion_password": &hcldec.AttrSpec{Name: "ssh_bastion_password", Type: cty.String, Required: false}, - "ssh_bastion_private_key_file": &hcldec.AttrSpec{Name: "ssh_bastion_private_key_file", Type: cty.String, Required: false}, - "ssh_file_transfer_method": &hcldec.AttrSpec{Name: "ssh_file_transfer_method", Type: cty.String, Required: false}, - "ssh_proxy_host": &hcldec.AttrSpec{Name: "ssh_proxy_host", Type: cty.String, Required: false}, - "ssh_proxy_port": &hcldec.AttrSpec{Name: "ssh_proxy_port", Type: cty.Number, Required: false}, - "ssh_proxy_username": &hcldec.AttrSpec{Name: "ssh_proxy_username", Type: cty.String, Required: false}, - "ssh_proxy_password": &hcldec.AttrSpec{Name: "ssh_proxy_password", Type: cty.String, Required: false}, - "ssh_keep_alive_interval": &hcldec.AttrSpec{Name: "ssh_keep_alive_interval", Type: cty.String, Required: false}, - "ssh_read_write_timeout": &hcldec.AttrSpec{Name: "ssh_read_write_timeout", Type: cty.String, Required: false}, - "ssh_remote_tunnels": &hcldec.AttrSpec{Name: "ssh_remote_tunnels", Type: cty.List(cty.String), Required: false}, - "ssh_local_tunnels": &hcldec.AttrSpec{Name: "ssh_local_tunnels", Type: cty.List(cty.String), Required: false}, - "ssh_public_key": &hcldec.AttrSpec{Name: "ssh_public_key", Type: cty.List(cty.Number), Required: false}, - "ssh_private_key": &hcldec.AttrSpec{Name: "ssh_private_key", Type: cty.List(cty.Number), Required: false}, - "winrm_username": &hcldec.AttrSpec{Name: "winrm_username", Type: cty.String, Required: false}, - "winrm_password": &hcldec.AttrSpec{Name: "winrm_password", Type: cty.String, Required: false}, - "winrm_host": &hcldec.AttrSpec{Name: "winrm_host", Type: cty.String, Required: false}, - "winrm_port": &hcldec.AttrSpec{Name: "winrm_port", Type: cty.Number, Required: false}, - "winrm_timeout": &hcldec.AttrSpec{Name: "winrm_timeout", Type: cty.String, Required: false}, - "winrm_use_ssl": &hcldec.AttrSpec{Name: "winrm_use_ssl", Type: cty.Bool, Required: false}, - "winrm_insecure": &hcldec.AttrSpec{Name: "winrm_insecure", Type: cty.Bool, Required: false}, - "winrm_use_ntlm": &hcldec.AttrSpec{Name: "winrm_use_ntlm", Type: cty.Bool, Required: false}, - "async_resourcegroup_delete": &hcldec.AttrSpec{Name: "async_resourcegroup_delete", Type: cty.Bool, Required: false}, + "location": &hcldec.AttrSpec{Name: "location", Type: cty.String, Required: false}, + "vm_size": &hcldec.AttrSpec{Name: "vm_size", Type: cty.String, Required: false}, + "managed_image_resource_group_name": &hcldec.AttrSpec{Name: "managed_image_resource_group_name", Type: cty.String, Required: false}, + "managed_image_name": &hcldec.AttrSpec{Name: "managed_image_name", Type: cty.String, Required: false}, + "managed_image_storage_account_type": &hcldec.AttrSpec{Name: "managed_image_storage_account_type", Type: cty.String, Required: false}, + "managed_image_os_disk_snapshot_name": &hcldec.AttrSpec{Name: "managed_image_os_disk_snapshot_name", Type: cty.String, Required: false}, + "managed_image_data_disk_snapshot_prefix": &hcldec.AttrSpec{Name: "managed_image_data_disk_snapshot_prefix", Type: cty.String, Required: false}, + "managed_image_zone_resilient": &hcldec.AttrSpec{Name: "managed_image_zone_resilient", Type: cty.Bool, Required: false}, + "azure_tags": &hcldec.BlockAttrsSpec{TypeName: "azure_tags", ElementType: cty.String, Required: false}, + "resource_group_name": &hcldec.AttrSpec{Name: "resource_group_name", Type: cty.String, Required: false}, + "storage_account": &hcldec.AttrSpec{Name: "storage_account", Type: cty.String, Required: false}, + "temp_compute_name": &hcldec.AttrSpec{Name: "temp_compute_name", Type: cty.String, Required: false}, + "temp_resource_group_name": &hcldec.AttrSpec{Name: "temp_resource_group_name", Type: cty.String, Required: false}, + "build_resource_group_name": &hcldec.AttrSpec{Name: "build_resource_group_name", Type: cty.String, Required: false}, + "build_key_vault_name": &hcldec.AttrSpec{Name: "build_key_vault_name", Type: cty.String, Required: false}, + "private_virtual_network_with_public_ip": &hcldec.AttrSpec{Name: "private_virtual_network_with_public_ip", Type: cty.Bool, Required: false}, + "virtual_network_name": &hcldec.AttrSpec{Name: "virtual_network_name", Type: cty.String, Required: false}, + "virtual_network_subnet_name": &hcldec.AttrSpec{Name: "virtual_network_subnet_name", Type: cty.String, Required: false}, + "virtual_network_resource_group_name": &hcldec.AttrSpec{Name: "virtual_network_resource_group_name", Type: cty.String, Required: false}, + "custom_data_file": &hcldec.AttrSpec{Name: "custom_data_file", Type: cty.String, Required: false}, + "plan_info": &hcldec.BlockSpec{TypeName: "plan_info", Nested: hcldec.ObjectSpec((*FlatPlanInformation)(nil).HCL2Spec())}, + "polling_duration_timeout": &hcldec.AttrSpec{Name: "polling_duration_timeout", Type: cty.String, Required: false}, + "os_type": &hcldec.AttrSpec{Name: "os_type", Type: cty.String, Required: false}, + "os_disk_size_gb": &hcldec.AttrSpec{Name: "os_disk_size_gb", Type: cty.Number, Required: false}, + "disk_additional_size": &hcldec.AttrSpec{Name: "disk_additional_size", Type: cty.List(cty.Number), Required: false}, + "disk_caching_type": &hcldec.AttrSpec{Name: "disk_caching_type", Type: cty.String, Required: false}, + "allowed_inbound_ip_addresses": &hcldec.AttrSpec{Name: "allowed_inbound_ip_addresses", Type: cty.List(cty.String), Required: false}, + "user_name": &hcldec.AttrSpec{Name: "user_name", Type: cty.String, Required: false}, + "password": &hcldec.AttrSpec{Name: "password", Type: cty.String, Required: false}, + "communicator": &hcldec.AttrSpec{Name: "communicator", Type: cty.String, Required: false}, + "pause_before_connecting": &hcldec.AttrSpec{Name: "pause_before_connecting", Type: cty.String, Required: false}, + "ssh_host": &hcldec.AttrSpec{Name: "ssh_host", Type: cty.String, Required: false}, + "ssh_port": &hcldec.AttrSpec{Name: "ssh_port", Type: cty.Number, Required: false}, + "ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false}, + "ssh_password": &hcldec.AttrSpec{Name: "ssh_password", Type: cty.String, Required: false}, + "ssh_keypair_name": &hcldec.AttrSpec{Name: "ssh_keypair_name", Type: cty.String, Required: false}, + "temporary_key_pair_name": &hcldec.AttrSpec{Name: "temporary_key_pair_name", Type: cty.String, Required: false}, + "ssh_clear_authorized_keys": &hcldec.AttrSpec{Name: "ssh_clear_authorized_keys", Type: cty.Bool, Required: false}, + "ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false}, + "ssh_pty": &hcldec.AttrSpec{Name: "ssh_pty", Type: cty.Bool, Required: false}, + "ssh_timeout": &hcldec.AttrSpec{Name: "ssh_timeout", Type: cty.String, Required: false}, + "ssh_agent_auth": &hcldec.AttrSpec{Name: "ssh_agent_auth", Type: cty.Bool, Required: false}, + "ssh_disable_agent_forwarding": &hcldec.AttrSpec{Name: "ssh_disable_agent_forwarding", Type: cty.Bool, Required: false}, + "ssh_handshake_attempts": &hcldec.AttrSpec{Name: "ssh_handshake_attempts", Type: cty.Number, Required: false}, + "ssh_bastion_host": &hcldec.AttrSpec{Name: "ssh_bastion_host", Type: cty.String, Required: false}, + "ssh_bastion_port": &hcldec.AttrSpec{Name: "ssh_bastion_port", Type: cty.Number, Required: false}, + "ssh_bastion_agent_auth": &hcldec.AttrSpec{Name: "ssh_bastion_agent_auth", Type: cty.Bool, Required: false}, + "ssh_bastion_username": &hcldec.AttrSpec{Name: "ssh_bastion_username", Type: cty.String, Required: false}, + "ssh_bastion_password": &hcldec.AttrSpec{Name: "ssh_bastion_password", Type: cty.String, Required: false}, + "ssh_bastion_private_key_file": &hcldec.AttrSpec{Name: "ssh_bastion_private_key_file", Type: cty.String, Required: false}, + "ssh_file_transfer_method": &hcldec.AttrSpec{Name: "ssh_file_transfer_method", Type: cty.String, Required: false}, + "ssh_proxy_host": &hcldec.AttrSpec{Name: "ssh_proxy_host", Type: cty.String, Required: false}, + "ssh_proxy_port": &hcldec.AttrSpec{Name: "ssh_proxy_port", Type: cty.Number, Required: false}, + "ssh_proxy_username": &hcldec.AttrSpec{Name: "ssh_proxy_username", Type: cty.String, Required: false}, + "ssh_proxy_password": &hcldec.AttrSpec{Name: "ssh_proxy_password", Type: cty.String, Required: false}, + "ssh_keep_alive_interval": &hcldec.AttrSpec{Name: "ssh_keep_alive_interval", Type: cty.String, Required: false}, + "ssh_read_write_timeout": &hcldec.AttrSpec{Name: "ssh_read_write_timeout", Type: cty.String, Required: false}, + "ssh_remote_tunnels": &hcldec.AttrSpec{Name: "ssh_remote_tunnels", Type: cty.List(cty.String), Required: false}, + "ssh_local_tunnels": &hcldec.AttrSpec{Name: "ssh_local_tunnels", Type: cty.List(cty.String), Required: false}, + "ssh_public_key": &hcldec.AttrSpec{Name: "ssh_public_key", Type: cty.List(cty.Number), Required: false}, + "ssh_private_key": &hcldec.AttrSpec{Name: "ssh_private_key", Type: cty.List(cty.Number), Required: false}, + "winrm_username": &hcldec.AttrSpec{Name: "winrm_username", Type: cty.String, Required: false}, + "winrm_password": &hcldec.AttrSpec{Name: "winrm_password", Type: cty.String, Required: false}, + "winrm_host": &hcldec.AttrSpec{Name: "winrm_host", Type: cty.String, Required: false}, + "winrm_port": &hcldec.AttrSpec{Name: "winrm_port", Type: cty.Number, Required: false}, + "winrm_timeout": &hcldec.AttrSpec{Name: "winrm_timeout", Type: cty.String, Required: false}, + "winrm_use_ssl": &hcldec.AttrSpec{Name: "winrm_use_ssl", Type: cty.Bool, Required: false}, + "winrm_insecure": &hcldec.AttrSpec{Name: "winrm_insecure", Type: cty.Bool, Required: false}, + "winrm_use_ntlm": &hcldec.AttrSpec{Name: "winrm_use_ntlm", Type: cty.Bool, Required: false}, + "async_resourcegroup_delete": &hcldec.AttrSpec{Name: "async_resourcegroup_delete", Type: cty.Bool, Required: false}, } return s } diff --git a/website/source/docs/builders/azure-arm.html.md.erb b/website/source/docs/builders/azure-arm.html.md.erb index 1c9bd3777..16e0e4c93 100644 --- a/website/source/docs/builders/azure-arm.html.md.erb +++ b/website/source/docs/builders/azure-arm.html.md.erb @@ -22,21 +22,23 @@ ARM](/docs/builders/azure.html#authentication-for-azure). The documentation below references command output from the [Azure CLI](https://azure.microsoft.com/en-us/documentation/articles/xplat-cli-install/). -## Authentication: -There are three options for Authenticationg to Azure, two of which require certain -configuration options in order to properly build an Azure Arm Image. +## Configuration Reference -### Managed Identity +There are many configuration options available for the builder. We'll start +with authentication parameters, then go over the Azure ARM builder specific +options. In addition to the options listed here, a [communicator](/docs/templates/communicator.html) can be configured for this builder. -If you're running packer on an Azure VM with a [managed identity](/docs/builders/azure.html#azure-managed-identity) -you don't need to specify any additional configuration options. As Packer will attempt to use the Managed Identity -and subscription of the VM that Packer is running on. +### Authentication options +<%= partial "partials/builder/azure/common/client/_Config" %> -### Interactive User Authentication +#### Managed Identity +If you're running packer on an Azure VM with a [managed identity](/docs/builders/azure.html#azure-managed-identity) you don't need to specify any additional configuration options. As Packer will attempt to use the Managed Identity and subscription of the VM that Packer is running on. + +#### Interactive User Authentication To use interactive user authentication, you should specify `subscription_id` only. Packer will use cached credentials or redirect you to a website to log in. -### Service Principal +#### Service Principal To use a [service principal](/docs/builders/azure.html#azure-active-directory-service-principal) you should specify `subscription_id`, `client_id` and one of `client_secret`, `client_cert_path` or `client_jwt`. @@ -60,34 +62,19 @@ you should specify `subscription_id`, `client_id` and one of `client_secret`, Directory docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-certificate-credentials) for more information. -<%= partial "partials/builder/azure/common/client/_Config" %> -## Configuration Reference - -The following configuration options are available for building Azure images. In -addition to the options listed here, a [communicator](/docs/templates/communicator.html) can be configured for this -builder. +### Azure ARM builder specific options The Azure builder can create either a VHD, or a managed image. If you are creating a VHD, you **must** start with a VHD. Likewise, if you want to create -a managed image you **must** start with a managed image. Images can be obtained from -the Azure Marketplace or from within a users' subscription - see [Using Custom Images](#using-custom-images) +a managed image you **must** start with a managed image. -### Using Azure Marketplace Images +### Required: <%= partial "partials/builder/azure/arm/Config-required" %> -### Using Custom Images - -- `image_url` (string) - Specify a custom VHD to use. If this value is set, do not set image_publisher, image_offer, image_sku, or image_version. - -- `custom_managed_image_resource_group_name` (string) - Specify the source managed image's resource group used to use. If this value is set, do not set image_publisher, image_offer, image_sku, or image_version. If this value is set, the value custom_managed_image_name must also be set. See documentation to learn more about managed images. - -- `custom_managed_image_name` (string) - Specify the source managed image's name to use. If this value is set, do not set image_publisher, image_offer, image_sku, or image_version. If this value is set, the value custom_managed_image_resource_group_name must also be set. See documentation to learn more about managed images. - - -When creating a VHD the following options are required: +When creating a VHD the following additional options are required: - `capture_container_name` (string) - Destination container name. Essentially the "directory" where your VHD will be organized in Azure. The captured @@ -103,7 +90,7 @@ When creating a VHD the following options are required: - `storage_account` (string) - Storage account under which the final artifact will be stored. -When creating a Managed Imaged the following options are required: +When creating a managed image the following additional options are required: - `managed_image_name` (string) - Specify the managed image name where the result of the Packer build will be saved. The image name must not exist @@ -119,6 +106,7 @@ When creating a Managed Imaged the following options are required: [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images) to learn more about managed images. +#### Shared Image Gallery Managed images can optionally be published to [Shared Image Gallery](https://azure.microsoft.com/en-us/blog/announcing-the-public-preview-of-shared-image-gallery/) as Shared Gallery Image version. Shared Image Gallery **only** works with Managed Images. **A VHD cannot be published to @@ -140,12 +128,6 @@ Following is an example. "managed_image_name": "TargetImageName", "managed_image_resource_group_name": "TargetResourceGroup" - -### Optional: - -<%= partial "partials/builder/azure/arm/Config-not-required" %> -<%= partial "partials/builder/azure/common/client/_Config-not-required" %> - #### Resource Group Usage The Azure builder can either provision resources into a new resource group that @@ -177,6 +159,12 @@ To use an existing resource group you **must** provide: Providing `temp_resource_group_name` or `location` in combination with `build_resource_group_name` is not allowed. +### Optional: + +<%= partial "partials/builder/azure/arm/Config-not-required" %> +<%= partial "partials/builder/azure/common/client/_Config-not-required" %> + + ## Basic Example Here is a basic example for Azure. diff --git a/website/source/partials/builder/azure/arm/_Config-not-required.html.md b/website/source/partials/builder/azure/arm/_Config-not-required.html.md index 7ed6fe482..6f040efe1 100644 --- a/website/source/partials/builder/azure/arm/_Config-not-required.html.md +++ b/website/source/partials/builder/azure/arm/_Config-not-required.html.md @@ -59,23 +59,6 @@ CLI example `az vm image list --location westus --publisher Canonical --offer UbuntuServer --sku 16.04.0-LTS --all` -- `image_url` (string) - Specify a custom VHD to use. If this value is set, do - not set image_publisher, image_offer, image_sku, or image_version. - -- `custom_managed_image_resource_group_name` (string) - Specify the source managed image's resource group used to use. If this - value is set, do not set image\_publisher, image\_offer, image\_sku, or - image\_version. If this value is set, the value - `custom_managed_image_name` must also be set. See - [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images) - to learn more about managed images. - -- `custom_managed_image_name` (string) - Specify the source managed image's name to use. If this value is set, do - not set image\_publisher, image\_offer, image\_sku, or image\_version. - If this value is set, the value - `custom_managed_image_resource_group_name` must also be set. See - [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images) - to learn more about managed images. - - `location` (string) - Location - `vm_size` (string) - Size of the VM used for building. This can be changed when you deploy a VM from your VHD. See diff --git a/website/source/partials/builder/azure/arm/_Config-required.html.md b/website/source/partials/builder/azure/arm/_Config-required.html.md index 696875dac..fba192a62 100644 --- a/website/source/partials/builder/azure/arm/_Config-required.html.md +++ b/website/source/partials/builder/azure/arm/_Config-required.html.md @@ -1,22 +1,38 @@ -- `image_publisher` (string) - PublisherName for your base image. See +- `image_publisher` (string) - Name of the publisher to use for your base image (Azure Marketplace Images only). See [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/) for details. CLI example `az vm image list-publishers --location westus` -- `image_offer` (string) - Offer for your base image. See +- `image_offer` (string) - Name of the publisher's offer to use for your base image (Azure Marketplace Images only). See [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/) for details. CLI example `az vm image list-offers --location westus --publisher Canonical` -- `image_sku` (string) - SKU for your base image. See +- `image_sku` (string) - SKU of the image offer to use for your base image (Azure Marketplace Images only). See [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/) for details. CLI example `az vm image list-skus --location westus --publisher Canonical --offer UbuntuServer` + +- `image_url` (string) - URL to a custom VHD to use for your base image. If this value is set, do + not set image_publisher, image_offer, image_sku, or image_version. + +- `custom_managed_image_name` (string) - Name of a custom managed image to use for your base image. If this value is set, do + not set image_publisher, image_offer, image_sku, or image_version. + If this value is set, the value + `custom_managed_image_resource_group_name` must also be set. See + [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images) + to learn more about managed images. + +- `custom_managed_image_resource_group_name` (string) - Name of a custom managed image's resource group to use for your base image. If this + value is set, image_publisher, image_offer, image_sku, or image_version. + `custom_managed_image_name` must also be set. See + [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images) + to learn more about managed images. \ No newline at end of file From 08f394604cfbd17911bfffddef29a21de3b9f56f Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Tue, 11 Feb 2020 12:07:06 +0100 Subject: [PATCH 28/61] Update iso_config.go use defaultGetterClient instead of getter.DefaultClient --- common/iso_config.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/common/iso_config.go b/common/iso_config.go index 338bafaf5..c092ccdd1 100644 --- a/common/iso_config.go +++ b/common/iso_config.go @@ -9,7 +9,6 @@ import ( "fmt" "strings" - getter "github.com/hashicorp/go-getter/v2" "github.com/hashicorp/packer/template/interpolate" ) @@ -163,7 +162,7 @@ func (c *ISOConfig) Prepare(*interpolate.Context) (warnings []string, errs []err if c.ISOChecksumURL != "" { url = c.ISOChecksumURL } - cksum, err := getter.DefaultClient.ChecksumFromFile(context.TODO(), url, c.ISOUrls[0]) + cksum, err := defaultGetterClient.ChecksumFromFile(context.TODO(), url, c.ISOUrls[0]) if err != nil { errs = append(errs, fmt.Errorf("Couldn't extract checksum from checksum file: %v", err)) } else { From 884959bc34fd4916c71cec1749d8b5127a7e4720 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 11 Feb 2020 12:09:30 -0800 Subject: [PATCH 29/61] update changelog --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d57b7916..1c43795ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ Packer team at HashiCorp moving forward. [GH-8480] ### IMPROVEMENTS: * builder/alicloud: Add AlicloudProfile option. [GH-8560] +* builder/amazon: Add max_retries option to aws builders [GH-8709] * builder/amazon: Add source AMI owner ID/name to template engines [GH-8550] * builder/amazon: Update instance waiters to use global waiter settings set by `AWS_POLL_DELAY_SECONDS` and `AWS_TIMEOUT_SECONDS` [GH-8699] @@ -15,6 +16,8 @@ Packer team at HashiCorp moving forward. [GH-8480] previously manually uploaded one. [GH-8624] * builder/vagrant: Fix a crash in the Vagrant driver [GH-8607] * builder/yandex: Add service account ID to config [GH-8717] +* communicator/winrm: Users can now override winrm_host with a static IP even + when using cloud builders. [GH-8675] * core: Add `PACKER_PLUGIN_PATH` to list of supported paths for plugin discovery [GH-8616] * core: clean up messy log line in plugin execution. [GH-8542] @@ -23,6 +26,8 @@ Packer team at HashiCorp moving forward. [GH-8480] * core: Fix loading external plugins defined in PACKER_CONFIG [GH-8582] * core: Log name of postprocessor running to disambiguate long chains of post- processors. [GH-8613] +* core: Packer can use isos in-place on Windows again, instead of copying them + into its cache. [GH-7627] * core: step_download: return without error if Urls is empty [GH-8579] * post-processor/vsphere-template] Simplify method to use vm.MarkAsTemplate (optionally) [GH-8511] From f764edbc858559769f9c17693bd240624a5d8af4 Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Tue, 11 Feb 2020 15:28:21 -0500 Subject: [PATCH 30/61] Update CHANGELOG * Remove incorrect entry for 8675 --- CHANGELOG.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c43795ab..4b4c00837 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,8 +48,6 @@ Packer team at HashiCorp moving forward. [GH-8480] * builder/vagrant: Fix bug with reading key from a path with spaces [GH-8605] * builder/virtualbox-ovf: Remove config dependency from StepImport [GH-8509] * builder/virtualbox-vm: use config as a non pointer to avoid a panic [GH-8576] -* communicator/winrm: Fix issue where the value of `winrm_host` was being - ignored for some builders [GH-8615] * core: Fix crash when build.sources is set to an invalid name [GH-8569] * core: Fix error loading .packerconfig [GH-8623] * core: Fix loading local ISO files when using `iso_target_path` [GH-8689] From a6d90babbf747f09fa8a42259607794dcf25121d Mon Sep 17 00:00:00 2001 From: Moss Date: Wed, 12 Feb 2020 11:56:18 +0100 Subject: [PATCH 31/61] Add check for json duplicate fields --- template/parse.go | 86 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 77 insertions(+), 9 deletions(-) diff --git a/template/parse.go b/template/parse.go index 33de19c22..2c2cf8b24 100644 --- a/template/parse.go +++ b/template/parse.go @@ -327,17 +327,12 @@ func (r *rawTemplate) parsePostProcessor( // Parse takes the given io.Reader and parses a Template object out of it. func Parse(r io.Reader) (*Template, error) { - // Create a buffer to copy what we read - var buf bytes.Buffer - if _, err := buf.ReadFrom(r); err != nil { - return nil, err - } - - // First, decode the object into an interface{}. We do this instead of - // the rawTemplate directly because we'd rather use mapstructure to + // First, decode the object into an interface{} and search for duplicate fields. + // We do this instead of the rawTemplate directly because we'd rather use mapstructure to // decode since it has richer errors. var raw interface{} - if err := json.Unmarshal(buf.Bytes(), &raw); err != nil { + buf, err := jsonUnmarshal(r, &raw) + if err != nil { return nil, err } @@ -394,6 +389,79 @@ func Parse(r io.Reader) (*Template, error) { return rawTpl.Template() } +func jsonUnmarshal(r io.Reader, raw *interface{}) (bytes.Buffer, error) { + // Create a buffer to copy what we read + var buf bytes.Buffer + if _, err := buf.ReadFrom(r); err != nil { + return buf, err + } + + // Decode the object into an interface{} + if err := json.Unmarshal(buf.Bytes(), raw); err != nil { + return buf, err + } + + // If Json is valid, check for duplicate fields to avoid silent unwanted override + jsonDecoder := json.NewDecoder(strings.NewReader(buf.String())) + if err := checkForDuplicateFields(jsonDecoder); err != nil { + return buf, err + } + + return buf, nil +} + +func checkForDuplicateFields(d *json.Decoder) error { + // Get next token from JSON + t, err := d.Token() + if err != nil { + return err + } + + delim, ok := t.(json.Delim) + // Do nothing if it's not a delimiter + if !ok { + return nil + } + + // Check for duplicates inside of a delimiter {} or [] + switch delim { + case '{': + keys := make(map[string]bool) + for d.More() { + // Get attribute key + t, err := d.Token() + if err != nil { + return err + } + key := t.(string) + + // Check for duplicates + if keys[key] { + return fmt.Errorf("template has duplicate field: %s", key) + } + keys[key] = true + + // Check value to find duplicates in nested blocks + if err := checkForDuplicateFields(d); err != nil { + return err + } + } + case '[': + for d.More() { + if err := checkForDuplicateFields(d); err != nil { + return err + } + } + } + + // consume closing delimiter } or ] + if _, err := d.Token(); err != nil { + return err + } + + return nil +} + // ParseFile is the same as Parse but is a helper to automatically open // a file for parsing. func ParseFile(path string) (*Template, error) { From d654898ebf141cc66a45cc59fa071c94cbc502b0 Mon Sep 17 00:00:00 2001 From: Moss Date: Wed, 12 Feb 2020 14:34:20 +0100 Subject: [PATCH 32/61] Add tests for check of json duplicate fields --- template/parse_test.go | 19 +++++++++++++++++++ .../test-fixtures/error-duplicate-config.json | 11 +++++++++++ .../error-duplicate-variables.json | 13 +++++++++++++ 3 files changed, 43 insertions(+) create mode 100644 template/test-fixtures/error-duplicate-config.json create mode 100644 template/test-fixtures/error-duplicate-variables.json diff --git a/template/parse_test.go b/template/parse_test.go index 77b63616d..03e9c23b3 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -560,3 +560,22 @@ func TestParse_bad(t *testing.T) { } } } + +func TestParse_checkForDuplicateFields(t *testing.T) { + cases := []struct { + File string + Expected string + }{ + {"error-duplicate-variables.json", "template has duplicate field: variables"}, + {"error-duplicate-config.json", "template has duplicate field: foo"}, + } + for _, tc := range cases { + _, err := ParseFile(fixtureDir(tc.File)) + if err == nil { + t.Fatalf("expected error") + } + if !strings.Contains(err.Error(), tc.Expected) { + t.Fatalf("file: %s\nExpected: %s\n%s\n", tc.File, tc.Expected, err.Error()) + } + } +} \ No newline at end of file diff --git a/template/test-fixtures/error-duplicate-config.json b/template/test-fixtures/error-duplicate-config.json new file mode 100644 index 000000000..19d2beeec --- /dev/null +++ b/template/test-fixtures/error-duplicate-config.json @@ -0,0 +1,11 @@ +{ + "variables": { + "var": "value" + }, + "builders": [ + { + "foo": "something", + "foo": "something" + } + ] +} diff --git a/template/test-fixtures/error-duplicate-variables.json b/template/test-fixtures/error-duplicate-variables.json new file mode 100644 index 000000000..c603b7fe0 --- /dev/null +++ b/template/test-fixtures/error-duplicate-variables.json @@ -0,0 +1,13 @@ +{ + "variables": { + "var": "value" + }, + "variables": { + "var": "value" + }, + "builders": [ + { + "foo": "something" + } + ] +} From dc81720dc9cfb486c10e211ef79901a5bb6df781 Mon Sep 17 00:00:00 2001 From: Moss Date: Wed, 12 Feb 2020 14:44:28 +0100 Subject: [PATCH 33/61] Fix format --- template/parse_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/template/parse_test.go b/template/parse_test.go index 03e9c23b3..4c2afbbeb 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -578,4 +578,4 @@ func TestParse_checkForDuplicateFields(t *testing.T) { t.Fatalf("file: %s\nExpected: %s\n%s\n", tc.File, tc.Expected, err.Error()) } } -} \ No newline at end of file +} From dcfdcb01434dd7c5cf658ed26742c23f6988d47a Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Wed, 12 Feb 2020 15:55:06 +0100 Subject: [PATCH 34/61] Update CHANGELOG.md add line about variables PR --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b4c00837..55affec99 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,9 @@ has been merged with the Packer core. It will be officially supported by the Packer team at HashiCorp moving forward. [GH-8480] +** HCL2 variables & functions ** HCL2 configurations can now use `variable`, +`variables`, `locals`, and functions [GH-8588]. + ### IMPROVEMENTS: * builder/alicloud: Add AlicloudProfile option. [GH-8560] * builder/amazon: Add max_retries option to aws builders [GH-8709] From efdbb4f5d38e7e9975f650d783294634d0f6c77b Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Wed, 12 Feb 2020 16:05:22 +0100 Subject: [PATCH 35/61] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 55affec99..1974afc18 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ Packer team at HashiCorp moving forward. [GH-8480] * core: Packer can use isos in-place on Windows again, instead of copying them into its cache. [GH-7627] * core: step_download: return without error if Urls is empty [GH-8579] +* core/hcl2: Fix bug preventing reading slices within other slices [GH-8669] * post-processor/vsphere-template] Simplify method to use vm.MarkAsTemplate (optionally) [GH-8511] * scripts: Fix some issues with mapstructure-to-hcl2 code generator. [GH-8574] From 20eb1ea59233ea8724d8a0468958fe51cf43fe8e Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Wed, 12 Feb 2020 16:23:06 +0100 Subject: [PATCH 36/61] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1974afc18..4862313de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,7 @@ has been merged with the Packer core. It will be officially supported by the Packer team at HashiCorp moving forward. [GH-8480] -** HCL2 variables & functions ** HCL2 configurations can now use `variable`, +**HCL2 variables & functions** HCL2 configurations can now use `variable`, `variables`, `locals`, and functions [GH-8588]. ### IMPROVEMENTS: From 78012dc56f33d7e28d1b5e1e13acf87ab990b8c9 Mon Sep 17 00:00:00 2001 From: "Dax T. Games" Date: Wed, 12 Feb 2020 11:31:46 -0500 Subject: [PATCH 37/61] add autogenerated help for vsphere iso remove cdrom --- builder/vsphere/iso/step_remove_cdrom.go | 4 +++ .../vsphere/iso/step_remove_cdrom.hcl2spec.go | 30 +++++++++++++++++++ .../iso/_CDRomConfig-not-required.html.md | 4 +-- .../_RemoveCDRomConfig-not-required.html.md | 4 +++ 4 files changed, 39 insertions(+), 3 deletions(-) create mode 100644 builder/vsphere/iso/step_remove_cdrom.hcl2spec.go create mode 100644 website/source/partials/builder/vsphere/iso/_RemoveCDRomConfig-not-required.html.md diff --git a/builder/vsphere/iso/step_remove_cdrom.go b/builder/vsphere/iso/step_remove_cdrom.go index 9fd3075a4..508676575 100644 --- a/builder/vsphere/iso/step_remove_cdrom.go +++ b/builder/vsphere/iso/step_remove_cdrom.go @@ -1,3 +1,6 @@ +//go:generate struct-markdown +//go:generate mapstructure-to-hcl2 -type RemoveCDRomConfig + package iso import ( @@ -8,6 +11,7 @@ import ( ) type RemoveCDRomConfig struct { + // Remove CD-ROM devices from template. Defaults to `false`. RemoveCdrom bool `mapstructure:"remove_cdrom"` } diff --git a/builder/vsphere/iso/step_remove_cdrom.hcl2spec.go b/builder/vsphere/iso/step_remove_cdrom.hcl2spec.go new file mode 100644 index 000000000..a8c2303cf --- /dev/null +++ b/builder/vsphere/iso/step_remove_cdrom.hcl2spec.go @@ -0,0 +1,30 @@ +// Code generated by "mapstructure-to-hcl2 -type RemoveCDRomConfig"; DO NOT EDIT. +package iso + +import ( + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// FlatRemoveCDRomConfig is an auto-generated flat version of RemoveCDRomConfig. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatRemoveCDRomConfig struct { + RemoveCdrom *bool `mapstructure:"remove_cdrom" cty:"remove_cdrom"` +} + +// FlatMapstructure returns a new FlatRemoveCDRomConfig. +// FlatRemoveCDRomConfig is an auto-generated flat version of RemoveCDRomConfig. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*RemoveCDRomConfig) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatRemoveCDRomConfig) +} + +// HCL2Spec returns the hcl spec of a RemoveCDRomConfig. +// This spec is used by HCL to read the fields of RemoveCDRomConfig. +// The decoded values from this spec will then be applied to a FlatRemoveCDRomConfig. +func (*FlatRemoveCDRomConfig) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "remove_cdrom": &hcldec.AttrSpec{Name: "remove_cdrom", Type: cty.Bool, Required: false}, + } + return s +} diff --git a/website/source/partials/builder/vsphere/iso/_CDRomConfig-not-required.html.md b/website/source/partials/builder/vsphere/iso/_CDRomConfig-not-required.html.md index ed50e4461..e3952b792 100644 --- a/website/source/partials/builder/vsphere/iso/_CDRomConfig-not-required.html.md +++ b/website/source/partials/builder/vsphere/iso/_CDRomConfig-not-required.html.md @@ -2,8 +2,6 @@ - `cdrom_type` (string) - Which controller to use. Example: `sata`. Defaults to `ide`. -- `remove_cdrom` (boolean) - Remove CD/DVD-ROM devices from template. Defaults to `false`. - - `iso_paths` ([]string) - List of datastore paths to ISO files that will be mounted to the VM. Example: `"[datastore1] ISO/ubuntu.iso"`. - + \ No newline at end of file diff --git a/website/source/partials/builder/vsphere/iso/_RemoveCDRomConfig-not-required.html.md b/website/source/partials/builder/vsphere/iso/_RemoveCDRomConfig-not-required.html.md new file mode 100644 index 000000000..a01290e7e --- /dev/null +++ b/website/source/partials/builder/vsphere/iso/_RemoveCDRomConfig-not-required.html.md @@ -0,0 +1,4 @@ + + +- `remove_cdrom` (bool) - Remove CD-ROM devices from template. Defaults to `false`. + \ No newline at end of file From 746b7cbf0f257e9fea3896aae9510769da4f7b74 Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Wed, 12 Feb 2020 13:35:38 -0500 Subject: [PATCH 38/61] cut release 1.5.2 --- CHANGELOG.md | 4 ++-- version/version.go | 2 +- website/config.rb | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4862313de..4dfbed6f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,9 @@ -## 1.5.2 (Upcoming) +## 1.5.2 (February 12, 2020) **New Builder** The vsphere-iso builder, previously maintained by JetBrains, has been merged with the Packer core. It will be officially supported by the Packer team at HashiCorp moving forward. [GH-8480] -**HCL2 variables & functions** HCL2 configurations can now use `variable`, +**HCL2 variables & functions** HCL2 configurations can now use `variable`, `variables`, `locals`, and functions [GH-8588]. ### IMPROVEMENTS: diff --git a/version/version.go b/version/version.go index f83d80332..409e93f56 100644 --- a/version/version.go +++ b/version/version.go @@ -14,7 +14,7 @@ const Version = "1.5.2" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" func FormattedVersion() string { var versionString bytes.Buffer diff --git a/website/config.rb b/website/config.rb index de2b97c4e..6e5ad4ef8 100644 --- a/website/config.rb +++ b/website/config.rb @@ -2,7 +2,7 @@ set :base_url, "https://www.packer.io/" activate :hashicorp do |h| h.name = "packer" - h.version = "1.5.1" + h.version = "1.5.2" h.github_slug = "hashicorp/packer" h.website_root = "website" end From c121aa9104008dcaccf3cc8b603718a6df7a0675 Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Wed, 12 Feb 2020 16:54:04 -0500 Subject: [PATCH 39/61] scripts/codesign_example: Add check for sha256sum program --- scripts/codesign_example.sh | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/scripts/codesign_example.sh b/scripts/codesign_example.sh index 05290589c..14d7ed6ef 100755 --- a/scripts/codesign_example.sh +++ b/scripts/codesign_example.sh @@ -8,6 +8,17 @@ if ! command -v jq > /dev/null 2>&1; then exit 1 fi +if ! command -v sha256sum > /dev/null 2>&1; then + if !command -v gsha256sum > /dev/null 2>&1; then + echo "This script requires sha256sum (linux) or gsha256sum (osx) to work properly." + exit 1 + else + SHASUM_PROG=gsha256sum + fi +else + SHASUM_PROG=sha256sum +fi + PRODUCT_NAME="${PRODUCT_NAME:-""}" if [ -z "$PRODUCT_NAME" ]; then echo "Missing required product name: ${PRODUCT_NAME}" @@ -145,6 +156,6 @@ signed_checksum=$( | grep -i "x-checksum-sha256" | awk 'gsub("[\r\n]", "", $2) {print $2;}' ) -echo "${signed_checksum} signed_${SN_ID}.zip" | gsha256sum -c +echo "${signed_checksum} signed_${SN_ID}.zip" | SHASUM_PROG -c -mv "signed_${SN_ID}.zip" "$TARGET_ZIP" \ No newline at end of file +mv "signed_${SN_ID}.zip" "$TARGET_ZIP" From c2a27efc52825986523f7708d5a3cda9e018375f Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Wed, 12 Feb 2020 16:55:34 -0500 Subject: [PATCH 40/61] Update CHANGELOG.md --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4dfbed6f5..bfd23a731 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,9 @@ Packer team at HashiCorp moving forward. [GH-8480] * builder/yandex: Add service account ID to config [GH-8717] * communicator/winrm: Users can now override winrm_host with a static IP even when using cloud builders. [GH-8675] +* core/hcl2: Fix bug preventing reading slices within other slices [GH-8669] +* core: Interpolation within post-processors can now access build-specific + values like Host IP, communicator password, and more [GH-8632] * core: Add `PACKER_PLUGIN_PATH` to list of supported paths for plugin discovery [GH-8616] * core: clean up messy log line in plugin execution. [GH-8542] @@ -32,7 +35,6 @@ Packer team at HashiCorp moving forward. [GH-8480] * core: Packer can use isos in-place on Windows again, instead of copying them into its cache. [GH-7627] * core: step_download: return without error if Urls is empty [GH-8579] -* core/hcl2: Fix bug preventing reading slices within other slices [GH-8669] * post-processor/vsphere-template] Simplify method to use vm.MarkAsTemplate (optionally) [GH-8511] * scripts: Fix some issues with mapstructure-to-hcl2 code generator. [GH-8574] From 768e0921b83b272b2f0c93f69780266bb1812dd4 Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Wed, 12 Feb 2020 17:20:05 -0500 Subject: [PATCH 41/61] Cut version 1.5.2 --- go.mod | 1 + go.sum | 3 +++ 2 files changed, 4 insertions(+) diff --git a/go.mod b/go.mod index 72ba04b88..9907a2515 100644 --- a/go.mod +++ b/go.mod @@ -112,6 +112,7 @@ require ( github.com/mitchellh/go-fs v0.0.0-20180402234041-7b48fa161ea7 github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed + github.com/mitchellh/gox v1.0.1 // indirect github.com/mitchellh/iochan v1.0.0 github.com/mitchellh/mapstructure v0.0.0-20180111000720-b4575eea38cc github.com/mitchellh/panicwrap v0.0.0-20170106182340-fce601fe5557 diff --git a/go.sum b/go.sum index 7f526f33c..b41b72d2d 100644 --- a/go.sum +++ b/go.sum @@ -263,6 +263,7 @@ github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1 github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= @@ -370,6 +371,8 @@ github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaC github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/gox v1.0.1 h1:x0jD3dcHk9a9xPSDN6YEL4xL6Qz0dvNYm8yZqui5chI= +github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18Ap4z4= github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= From 675fe95882877690782c67f290440783a1b83519 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 12 Feb 2020 15:22:57 -0800 Subject: [PATCH 42/61] move to 1.5.3-dev --- version/version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version/version.go b/version/version.go index 409e93f56..9d7cc7cac 100644 --- a/version/version.go +++ b/version/version.go @@ -9,12 +9,12 @@ import ( var GitCommit string // The main version number that is being run at the moment. -const Version = "1.5.2" +const Version = "1.5.3" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" func FormattedVersion() string { var versionString bytes.Buffer From c2ad9481f633f84d665258f80c18c923863e98bc Mon Sep 17 00:00:00 2001 From: Aayush Sarva Date: Thu, 13 Feb 2020 20:09:45 +0530 Subject: [PATCH 43/61] Fix typo in Dockerfiles section (#8728) `an container runner` => `a container runner` --- website/source/docs/builders/docker.html.md.erb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/docker.html.md.erb b/website/source/docs/builders/docker.html.md.erb index 7d3f030b6..e85b1d955 100644 --- a/website/source/docs/builders/docker.html.md.erb +++ b/website/source/docs/builders/docker.html.md.erb @@ -321,7 +321,7 @@ Instead, you can just provide shell scripts, Chef recipes, Puppet manifests, etc. to provision your Docker container just like you would a regular virtualized or dedicated machine. -While Docker has many features, Packer views Docker simply as an container +While Docker has many features, Packer views Docker simply as a container runner. To that end, Packer is able to repeatedly build these containers using portable provisioning scripts. From 23fa3107a3f1cd8a3b330c32b8030fad6f9ddb57 Mon Sep 17 00:00:00 2001 From: Sylvia Moss Date: Thu, 13 Feb 2020 17:35:23 +0100 Subject: [PATCH 44/61] Render variables one time on prepare method (#8727) --- builder/null/artifact_export.go | 5 +++ builder/null/builder.go | 1 + command/build_test.go | 32 ++++++++++++++++++- .../build-variable-sharing/template.json | 25 +++++++++++++++ packer/provisioner.go | 7 ++++ template/interpolate/i.go | 7 +++- template/interpolate/render.go | 2 +- 7 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 command/test-fixtures/build-variable-sharing/template.json diff --git a/builder/null/artifact_export.go b/builder/null/artifact_export.go index 3962358e0..e3d23236d 100644 --- a/builder/null/artifact_export.go +++ b/builder/null/artifact_export.go @@ -25,6 +25,11 @@ func (a *NullArtifact) String() string { } func (a *NullArtifact) State(name string) interface{} { + if name == "generated_data" { + return map[interface{}]interface{}{ + "ID": "Null", + } + } return nil } diff --git a/builder/null/builder.go b/builder/null/builder.go index e0568ce05..1d2fc40e4 100644 --- a/builder/null/builder.go +++ b/builder/null/builder.go @@ -47,6 +47,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack state := new(multistep.BasicStateBag) state.Put("hook", hook) state.Put("ui", ui) + state.Put("instance_id", "Null") // Run! b.runner = common.NewRunner(steps, b.config.PackerConfig, ui) diff --git a/command/build_test.go b/command/build_test.go index ca8252715..76e49d65a 100644 --- a/command/build_test.go +++ b/command/build_test.go @@ -111,6 +111,33 @@ func TestBuildOnlyFileMultipleFlags(t *testing.T) { } } +func TestBuildProvisionAndPosProcessWithBuildVariablesSharing(t *testing.T) { + c := &BuildCommand{ + Meta: testMetaFile(t), + } + + args := []string{ + filepath.Join(testFixture("build-variable-sharing"), "template.json"), + } + + files := []string{ + "provisioner.Null.txt", + "post-processor.Null.txt", + } + + defer cleanup(files...) + + if code := c.Run(args); code != 0 { + fatalCommand(t, c.Meta) + } + + for _, f := range files { + if !fileExists(f) { + t.Errorf("Expected to find %s", f) + } + } +} + func TestBuildEverything(t *testing.T) { c := &BuildCommand{ Meta: testMetaFile(t), @@ -231,7 +258,7 @@ func testMetaFile(t *testing.T) Meta { } } -func cleanup() { +func cleanup(moreFiles ...string) { os.RemoveAll("chocolate.txt") os.RemoveAll("vanilla.txt") os.RemoveAll("cherry.txt") @@ -245,6 +272,9 @@ func cleanup() { os.RemoveAll("lilas.txt") os.RemoveAll("campanules.txt") os.RemoveAll("ducky.txt") + for _, file := range moreFiles { + os.RemoveAll(file) + } } func TestBuildCommand_ParseArgs(t *testing.T) { diff --git a/command/test-fixtures/build-variable-sharing/template.json b/command/test-fixtures/build-variable-sharing/template.json new file mode 100644 index 000000000..ac1c410b5 --- /dev/null +++ b/command/test-fixtures/build-variable-sharing/template.json @@ -0,0 +1,25 @@ +{ + "builders": [ + { + "name": "chocolate", + "type": "null", + "communicator": "none" + } + ], + "provisioners": [ + { + "type": "shell-local", + "inline": [ + "echo hi > provisioner.{{ build `ID`}}.txt" + ] + } + ], + "post-processors": [ + { + "type": "shell-local", + "inline": [ + "echo hi > post-processor.{{ build `ID`}}.txt" + ] + } + ] +} \ No newline at end of file diff --git a/packer/provisioner.go b/packer/provisioner.go index fce6355f3..7b7fdaa60 100644 --- a/packer/provisioner.go +++ b/packer/provisioner.go @@ -80,6 +80,13 @@ func BasicPlaceholderData() map[string]string { } func CastDataToMap(data interface{}) map[string]interface{} { + + if interMap, ok := data.(map[string]interface{}); ok { + // null and file builder sometimes don't use a communicator and + // therefore don't go through RPC + return interMap + } + // Provisioners expect a map[string]interface{} in their data field, but // it gets converted into a map[interface]interface on the way over the // RPC. Check that data can be cast into such a form, and cast it. diff --git a/template/interpolate/i.go b/template/interpolate/i.go index 9dcdc4ba8..54d1e516e 100644 --- a/template/interpolate/i.go +++ b/template/interpolate/i.go @@ -44,7 +44,12 @@ func NewContext() *Context { return &Context{} } -// Render is shorthand for constructing an I and calling Render. +// RenderOnce is shorthand for constructing an I and calling Render one time. +func RenderOnce(v string, ctx *Context) (string, error) { + return (&I{Value: v}).Render(ctx) +} + +// Render is shorthand for constructing an I and calling Render until all variables are rendered. func Render(v string, ctx *Context) (rendered string, err error) { // Keep interpolating until all variables are done // Sometimes a variable can been inside another one diff --git a/template/interpolate/render.go b/template/interpolate/render.go index 4c49d43b8..7893e8745 100644 --- a/template/interpolate/render.go +++ b/template/interpolate/render.go @@ -57,7 +57,7 @@ func RenderMap(v interface{}, ctx *Context, f *RenderFilter) (map[string]interfa // RenderInterface renders any value and returns the resulting value. func RenderInterface(v interface{}, ctx *Context) (interface{}, error) { f := func(v string) (string, error) { - return Render(v, ctx) + return RenderOnce(v, ctx) } walker := &renderWalker{ From ff1af40c661f7094ee70b7ed411a439509205b27 Mon Sep 17 00:00:00 2001 From: jhawk28 Date: Fri, 14 Feb 2020 02:50:03 -0500 Subject: [PATCH 45/61] struct-markdown should keep the same slash direction regardless of Windows or Linux (#8738) --- cmd/struct-markdown/main.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/struct-markdown/main.go b/cmd/struct-markdown/main.go index 77ba2cd9f..1474be1a9 100644 --- a/cmd/struct-markdown/main.go +++ b/cmd/struct-markdown/main.go @@ -60,19 +60,20 @@ func main() { } fields := structDecl.Fields.List + sourcePath := filepath.ToSlash(paths[1]) header := Struct{ - SourcePath: paths[1], + SourcePath: sourcePath, Name: typeSpec.Name.Name, Filename: "_" + typeSpec.Name.Name + ".html.md", Header: typeDecl.Doc.Text(), } required := Struct{ - SourcePath: paths[1], + SourcePath: sourcePath, Name: typeSpec.Name.Name, Filename: "_" + typeSpec.Name.Name + "-required.html.md", } notRequired := Struct{ - SourcePath: paths[1], + SourcePath: sourcePath, Name: typeSpec.Name.Name, Filename: "_" + typeSpec.Name.Name + "-not-required.html.md", } From 483c8ec7d0e651c6442ab0a71f3ff46803bf1d5f Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 14 Feb 2020 02:20:36 -0800 Subject: [PATCH 46/61] add go mod and go sum to gitattributes to fix line endings on windows (#8734) --- .gitattributes | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitattributes b/.gitattributes index 490d8faf8..30f357a76 100644 --- a/.gitattributes +++ b/.gitattributes @@ -5,4 +5,6 @@ *.md text eol=lf *.ps1 text eol=lf *.hcl text eol=lf +go.mod text eol=lf +go.sum text eol=lf common/test-fixtures/root/* eol=lf From 2d0c796837839199aa4fe2808e70615464d58347 Mon Sep 17 00:00:00 2001 From: Mark Lewis <56076038+ml4@users.noreply.github.com> Date: Fri, 14 Feb 2020 14:30:25 +0000 Subject: [PATCH 47/61] Fix typos in Input and Local Variables guide (#8741) --- website/source/guides/hcl/variables/index.html.md.erb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/guides/hcl/variables/index.html.md.erb b/website/source/guides/hcl/variables/index.html.md.erb index 3b82f8370..8ba9a4b1f 100644 --- a/website/source/guides/hcl/variables/index.html.md.erb +++ b/website/source/guides/hcl/variables/index.html.md.erb @@ -41,8 +41,8 @@ two have empty blocks `{}`. The third sets a default. If a default value is set, the variable is optional. Otherwise, the variable is required. This also defines two locals: `debian_ami_name` and `foo`. --> **Note**: that it is *not* possible to use variables in a variable defintion -but it *is* possible to use locals and variables in a local definintion. +-> **Note**: that it is *not* possible to use variables in a variable definition +but it *is* possible to use locals and variables in a local definition. ## Using Variables and locals in Configuration From 19e7114301ad387661791f22369a32ad71dba0bc Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Tue, 11 Feb 2020 16:19:16 -0500 Subject: [PATCH 48/61] scripts/prepare_changelog: Update jq filter to ignore tech-debt labelled pull-requests --- scripts/prepare_changelog.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/prepare_changelog.sh b/scripts/prepare_changelog.sh index 74c8382e6..658e0dd06 100755 --- a/scripts/prepare_changelog.sh +++ b/scripts/prepare_changelog.sh @@ -12,7 +12,8 @@ is_doc_pr(){ return 1 fi PR_NUM=$1 - out=$(curl -fsS "https://api.github.com/repos/hashicorp/packer/issues/${PR_NUM}" | jq '[.labels[].name == "docs"] | any') + out=$(curl -fsS "https://api.github.com/repos/hashicorp/packer/issues/${PR_NUM}" \ + | jq '[.labels[].name == "docs" or .labels[].name == "tech-debt"] | any') exy="$?" if [ $exy -ne 0 ]; then echo "bad response from github" From 2bdca997acd6616ffdafeff04c8210ae77438503 Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Wed, 12 Feb 2020 12:25:54 -0500 Subject: [PATCH 49/61] Update function name --- scripts/prepare_changelog.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/prepare_changelog.sh b/scripts/prepare_changelog.sh index 658e0dd06..87af3cac9 100755 --- a/scripts/prepare_changelog.sh +++ b/scripts/prepare_changelog.sh @@ -5,7 +5,7 @@ DO_PR_CHECK=1 set -o pipefail -is_doc_pr(){ +is_doc_or_tech_debt_pr(){ if ! (($+commands[jq])); then DO_PR_CHECK=0 echo "jq not found" @@ -39,16 +39,16 @@ get_prs(){ fi done | while read PR_NUM do - if (($DO_PR_CHECK)) && is_doc_pr $PR_NUM; then + if (($DO_PR_CHECK)) && is_doc_or_tech_debt_pr $PR_NUM; then continue fi echo "https://github.com/hashicorp/packer/pull/${PR_NUM}" done } -#is_doc_pr 52061111 -# is_doc_pr 5206 # non-doc pr -#is_doc_pr 5434 # doc pr +#is_doc_or_tech_debt_pr 52061111 +# is_doc_or_tech_debt_pr 5206 # non-doc pr +#is_doc_or_tech_debt_pr 5434 # doc pr #echo $? #exit From 7254b0412980a001b69f369b5aa5d01a18b135d3 Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Fri, 14 Feb 2020 11:33:33 -0500 Subject: [PATCH 50/61] script/prepare_changelog: Update to show squashed merge commits (#8744) * script/prepare_changelog: Update regex to include squashed PRs * scripts/prepare_changelog: Update to show all commits not just merged commits --- scripts/prepare_changelog.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/prepare_changelog.sh b/scripts/prepare_changelog.sh index 87af3cac9..efd10e914 100755 --- a/scripts/prepare_changelog.sh +++ b/scripts/prepare_changelog.sh @@ -29,9 +29,9 @@ if [ -z $LAST_RELEASE ]; then fi get_prs(){ - # git log --merges v0.10.2...c3861d167533fb797b0fae0c380806625712e5f7 | - git log --merges HEAD...${LAST_RELEASE} | - grep -o "Merge pull request #\([0-9]\+\)" | awk -F\# '{print $2}' | while read line + # git log v0.10.2...c3861d167533fb797b0fae0c380806625712e5f7 | + git log HEAD...${LAST_RELEASE} | + grep -o "#\([0-9]\+\)" | awk -F\# '{print $2}' | while read line do grep -q "GH-${line}" CHANGELOG.md if [ $? -ne 0 ]; then From 2981fd627d8ea7ebe5314c362a0f37e17f91e1e4 Mon Sep 17 00:00:00 2001 From: Sylvia Moss Date: Fri, 14 Feb 2020 17:39:32 +0100 Subject: [PATCH 51/61] Avoid calling CoreBuild.Prepare(...) for HCL2 templates (#8742) --- hcl2template/types.build.post-processor.go | 4 ++-- hcl2template/types.build.provisioners.go | 19 +++------------ hcl2template/types.packer_config.go | 27 ++++++++++++++++++---- hcl2template/types.packer_config_test.go | 5 ++-- packer/build.go | 10 ++++++++ packer/build_test.go | 11 +++++++++ 6 files changed, 51 insertions(+), 25 deletions(-) diff --git a/hcl2template/types.build.post-processor.go b/hcl2template/types.build.post-processor.go index 1607d59b8..890f2d8d3 100644 --- a/hcl2template/types.build.post-processor.go +++ b/hcl2template/types.build.post-processor.go @@ -49,7 +49,7 @@ func (p *Parser) decodePostProcessor(block *hcl.Block) (*PostProcessorBlock, hcl return postProcessor, diags } -func (p *Parser) startPostProcessor(pp *PostProcessorBlock, ectx *hcl.EvalContext) (packer.PostProcessor, hcl.Diagnostics) { +func (p *Parser) startPostProcessor(pp *PostProcessorBlock, ectx *hcl.EvalContext, generatedVars map[string]string) (packer.PostProcessor, hcl.Diagnostics) { // ProvisionerBlock represents a detected but unparsed provisioner var diags hcl.Diagnostics @@ -64,7 +64,7 @@ func (p *Parser) startPostProcessor(pp *PostProcessorBlock, ectx *hcl.EvalContex } flatProvisinerCfg, moreDiags := decodeHCL2Spec(pp.Rest, ectx, postProcessor) diags = append(diags, moreDiags...) - err = postProcessor.Configure(flatProvisinerCfg) + err = postProcessor.Configure(flatProvisinerCfg, generatedVars) if err != nil { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, diff --git a/hcl2template/types.build.provisioners.go b/hcl2template/types.build.provisioners.go index d97b8c20c..862b13ca2 100644 --- a/hcl2template/types.build.provisioners.go +++ b/hcl2template/types.build.provisioners.go @@ -5,7 +5,6 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/packer/helper/common" "github.com/hashicorp/packer/packer" ) @@ -47,7 +46,7 @@ func (p *Parser) decodeProvisioner(block *hcl.Block) (*ProvisionerBlock, hcl.Dia return provisioner, diags } -func (p *Parser) startProvisioner(pb *ProvisionerBlock, ectx *hcl.EvalContext, generatedVars []string) (packer.Provisioner, hcl.Diagnostics) { +func (p *Parser) startProvisioner(pb *ProvisionerBlock, ectx *hcl.EvalContext, generatedVars map[string]string) (packer.Provisioner, hcl.Diagnostics) { var diags hcl.Diagnostics provisioner, err := p.ProvisionersSchemas.Start(pb.PType) @@ -67,22 +66,10 @@ func (p *Parser) startProvisioner(pb *ProvisionerBlock, ectx *hcl.EvalContext, g // manipulate generatedVars from builder to add to the interfaces being // passed to the provisioner Prepare() - // If the builder has provided a list of to-be-generated variables that - // should be made accessible to provisioners, pass that list into - // the provisioner prepare() so that the provisioner can appropriately - // validate user input against what will become available. Otherwise, - // only pass the default variables, using the basic placeholder data. - generatedPlaceholderMap := packer.BasicPlaceholderData() - if generatedVars != nil { - for _, k := range generatedVars { - generatedPlaceholderMap[k] = fmt.Sprintf("Generated_%s. "+ - common.PlaceholderMsg, k) - } - } // configs := make([]interface{}, 2) // configs = append(, flatProvisionerCfg) - // configs = append(configs, generatedPlaceholderMap) - err = provisioner.Prepare(flatProvisionerCfg, generatedPlaceholderMap) + // configs = append(configs, generatedVars) + err = provisioner.Prepare(flatProvisionerCfg, generatedVars) if err != nil { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, diff --git a/hcl2template/types.packer_config.go b/hcl2template/types.packer_config.go index 6e003d50a..4f02e2586 100644 --- a/hcl2template/types.packer_config.go +++ b/hcl2template/types.packer_config.go @@ -1,7 +1,9 @@ package hcl2template import ( + "fmt" "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/packer/helper/common" "github.com/hashicorp/packer/packer" "github.com/zclconf/go-cty/cty" ) @@ -42,7 +44,7 @@ func (cfg *PackerConfig) EvalContext() *hcl.EvalContext { // getCoreBuildProvisioners takes a list of provisioner block, starts according // provisioners and sends parsed HCL2 over to it. -func (p *Parser) getCoreBuildProvisioners(blocks []*ProvisionerBlock, ectx *hcl.EvalContext, generatedVars []string) ([]packer.CoreBuildProvisioner, hcl.Diagnostics) { +func (p *Parser) getCoreBuildProvisioners(blocks []*ProvisionerBlock, ectx *hcl.EvalContext, generatedVars map[string]string) ([]packer.CoreBuildProvisioner, hcl.Diagnostics) { var diags hcl.Diagnostics res := []packer.CoreBuildProvisioner{} for _, pb := range blocks { @@ -62,11 +64,11 @@ func (p *Parser) getCoreBuildProvisioners(blocks []*ProvisionerBlock, ectx *hcl. // getCoreBuildProvisioners takes a list of post processor block, starts // according provisioners and sends parsed HCL2 over to it. -func (p *Parser) getCoreBuildPostProcessors(blocks []*PostProcessorBlock, ectx *hcl.EvalContext) ([]packer.CoreBuildPostProcessor, hcl.Diagnostics) { +func (p *Parser) getCoreBuildPostProcessors(blocks []*PostProcessorBlock, ectx *hcl.EvalContext, generatedVars map[string]string) ([]packer.CoreBuildPostProcessor, hcl.Diagnostics) { var diags hcl.Diagnostics res := []packer.CoreBuildPostProcessor{} for _, ppb := range blocks { - postProcessor, moreDiags := p.startPostProcessor(ppb, ectx) + postProcessor, moreDiags := p.startPostProcessor(ppb, ectx, generatedVars) diags = append(diags, moreDiags...) if moreDiags.HasErrors() { continue @@ -104,12 +106,26 @@ func (p *Parser) getBuilds(cfg *PackerConfig) ([]packer.Build, hcl.Diagnostics) if moreDiags.HasErrors() { continue } - provisioners, moreDiags := p.getCoreBuildProvisioners(build.ProvisionerBlocks, cfg.EvalContext(), generatedVars) + + // If the builder has provided a list of to-be-generated variables that + // should be made accessible to provisioners, pass that list into + // the provisioner prepare() so that the provisioner can appropriately + // validate user input against what will become available. Otherwise, + // only pass the default variables, using the basic placeholder data. + generatedPlaceholderMap := packer.BasicPlaceholderData() + if generatedVars != nil { + for _, k := range generatedVars { + generatedPlaceholderMap[k] = fmt.Sprintf("Build_%s. "+ + common.PlaceholderMsg, k) + } + } + + provisioners, moreDiags := p.getCoreBuildProvisioners(build.ProvisionerBlocks, cfg.EvalContext(), generatedPlaceholderMap) diags = append(diags, moreDiags...) if moreDiags.HasErrors() { continue } - postProcessors, moreDiags := p.getCoreBuildPostProcessors(build.PostProcessors, cfg.EvalContext()) + postProcessors, moreDiags := p.getCoreBuildPostProcessors(build.PostProcessors, cfg.EvalContext(), generatedPlaceholderMap) pps := [][]packer.CoreBuildPostProcessor{} if len(postProcessors) > 0 { pps = [][]packer.CoreBuildPostProcessor{postProcessors} @@ -124,6 +140,7 @@ func (p *Parser) getBuilds(cfg *PackerConfig) ([]packer.Build, hcl.Diagnostics) Builder: builder, Provisioners: provisioners, PostProcessors: pps, + Prepared: true, } res = append(res, pcb) } diff --git a/hcl2template/types.packer_config_test.go b/hcl2template/types.packer_config_test.go index 38fed400a..b76133e0e 100644 --- a/hcl2template/types.packer_config_test.go +++ b/hcl2template/types.packer_config_test.go @@ -57,8 +57,9 @@ func TestParser_complete(t *testing.T) { false, false, []packer.Build{ &packer.CoreBuild{ - Type: "virtualbox-iso", - Builder: basicMockBuilder, + Type: "virtualbox-iso", + Prepared: true, + Builder: basicMockBuilder, Provisioners: []packer.CoreBuildProvisioner{ { PType: "shell", diff --git a/packer/build.go b/packer/build.go index 56a69b609..007c6ed3b 100644 --- a/packer/build.go +++ b/packer/build.go @@ -97,6 +97,9 @@ type CoreBuild struct { TemplatePath string Variables map[string]string + // Indicates whether the build is already initialized before calling Prepare(..) + Prepared bool + debug bool force bool onError string @@ -132,6 +135,13 @@ func (b *CoreBuild) Name() string { // and any hooks. This _must_ be called prior to Run. The parameter is the // overrides for the variables within the template (if any). func (b *CoreBuild) Prepare() (warn []string, err error) { + // For HCL2 templates, the builder and hooks are initialized when the template is parsed. + // Calling Prepare(...) is not necessary + if b.Prepared { + b.prepareCalled = true + return + } + b.l.Lock() defer b.l.Unlock() diff --git a/packer/build_test.go b/packer/build_test.go index e285554a6..dc1d698b6 100644 --- a/packer/build_test.go +++ b/packer/build_test.go @@ -88,6 +88,17 @@ func TestBuild_Prepare(t *testing.T) { } } +func TestBuild_Prepare_SkipWhenBuilderAlreadyInitialized(t *testing.T) { + build := testBuild() + builder := build.Builder.(*MockBuilder) + + build.Prepared = true + build.Prepare() + if builder.PrepareCalled { + t.Fatal("should not be called") + } +} + func TestBuild_Prepare_Twice(t *testing.T) { build := testBuild() warn, err := build.Prepare() From 9ec8b67392a8e458d802d8245ddb1fc9efea7b51 Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Fri, 14 Feb 2020 11:42:29 -0500 Subject: [PATCH 52/61] Add golangci-lint to project (#8686) * Add golangci-lint as linting tool * Disable failing staticchecks to start; GitHub issue to handle coming soon * Run `goimports -w` to repair all source files that have improperly formatted imports * makefile: Add ci-lint target to run on travis This change adds a new make target for running golangci-lint on newly added Go files only. This target is expected to run during Packer ci builds. * .github/contributing: Add code linting instructions * travis: Update job configuration to run parallel builds --- .github/CONTRIBUTING.md | 22 ++++ .golangci.yml | 124 ++++++++++++++++++ .travis.yml | 21 +-- Makefile | 23 +++- builder/amazon/chroot/builder.go | 2 +- builder/amazon/chroot/step_mount_device.go | 2 +- builder/amazon/chroot/step_prepare_device.go | 2 +- .../common/interpolate_build_info_test.go | 2 +- .../common/step_modify_ami_attributes.go | 1 + builder/amazon/ebs/builder.go | 1 + builder/amazon/ebssurrogate/builder.go | 1 + builder/amazon/ebssurrogate/builder_test.go | 3 +- builder/amazon/instance/builder.go | 2 +- builder/generated_data_test.go | 3 +- builder/lxc/step_provision.go | 2 +- builder/lxd/step_provision.go | 2 +- builder/osc/chroot/step_chroot_provision.go | 2 +- builder/parallels/common/ssh_config.go | 3 +- builder/parallels/common/ssh_config_test.go | 5 +- builder/qemu/step_http_ip_discover.go | 1 + builder/qemu/step_http_ip_discover_test.go | 3 +- .../vmware/common/step_http_ip_discover.go | 3 +- .../common/step_http_ip_discover_test.go | 3 +- builder/vsphere/clone/builder.go | 1 + builder/vsphere/clone/builder_acc_test.go | 5 +- builder/vsphere/clone/builder_test.go | 3 +- builder/vsphere/clone/step_clone.go | 1 + builder/vsphere/common/step_config_params.go | 1 + builder/vsphere/common/step_connect.go | 1 + builder/vsphere/common/step_hardware.go | 1 + builder/vsphere/common/step_run.go | 3 +- builder/vsphere/common/step_shutdown.go | 5 +- builder/vsphere/common/step_snapshot.go | 1 + builder/vsphere/common/step_template.go | 1 + builder/vsphere/common/step_wait_for_ip.go | 5 +- builder/vsphere/common/testing/utility.go | 7 +- builder/vsphere/driver/datastore.go | 1 + builder/vsphere/driver/driver.go | 5 +- builder/vsphere/driver/folder.go | 1 + builder/vsphere/driver/resource_pool.go | 1 + builder/vsphere/driver/vm.go | 7 +- builder/vsphere/driver/vm_cdrom.go | 1 + builder/vsphere/driver/vm_keyboard.go | 5 +- builder/vsphere/examples/driver/main.go | 1 + builder/vsphere/iso/builder_acc_test.go | 7 +- builder/vsphere/iso/step_add_cdrom.go | 1 + builder/vsphere/iso/step_add_floppy.go | 1 + builder/vsphere/iso/step_boot_command.go | 13 +- builder/vsphere/iso/step_remote_upload.go | 3 +- builder/vsphere/iso/step_remove_cdrom.go | 1 + builder/vsphere/iso/step_remove_floppy.go | 1 + command/build_parallel_test.go | 3 +- common/shell-local/config.go | 1 - ...r_amazon_temporary_security_group_cidrs.go | 3 +- helper/ssh/tunnel_test.go | 3 +- plugin/example/main.go | 2 +- post-processor/vagrant/util.go | 2 +- template/interpolate/i.go | 3 +- 58 files changed, 271 insertions(+), 63 deletions(-) create mode 100644 .golangci.yml diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 33f9c3f64..930ddba4a 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -259,6 +259,28 @@ localized code generation. Say you are working on the Amazon builder: running `go generate ./builder/amazon/...` will do that for you. Make sure that the latest code generation tool is installed by running `make install-gen-deps`. +#### Code linting + +Packer relies on [golangci-lint](https://github.com/golangci/golangci-lint) for linting its Go code base, excluding any generated code created by `go generate`. Linting is executed on new files during Travis builds via `make ci`; the linting of existing code base is only executed when running `make lint`. Linting a large project like Packer is an iterative process so existing code base will have issues that are actively being fixed; pull-requests that fix existing linting issues are always welcomed :smile:. + +The main configuration for golangci-lint is the `.golangci.yml` in the project root. See `golangci-lint --help` for a list of flags that can be used to override the default configuration. + +Run golangci-lint on the entire Packer code base. +``` +make lint +``` + +Run golangci-lint on a single pkg or directory; PKG_NAME expands to /builder/amazon/... +``` +make lint PKG_NAME=builder/amazon +``` + +Note: linting on Travis uses the `--new-from-rev=origin/master` flag to only lint new files added within a branch or pull-request. To run this check locally you can use the `ci-lint` make target. See [golangci-lint in CI](https://github.com/golangci/golangci-lint#faq) for more information. + +``` +make ci-lint +``` + #### Running Unit Tests You can run tests for individual packages using commands like this: diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..a6e07e91f --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,124 @@ +issues: + # List of regexps of issue texts to exclude, empty list by default. + # But independently from this option we use default exclude patterns, + # it can be disabled by `exclude-use-default: false`. To list all + # excluded by default patterns execute `golangci-lint run --help` + + exclude-rules: + # Exclude gosimple bool check + - linters: + - gosimple + text: "S(1002|1008|1021)" + # Exclude failing staticchecks for now + - linters: + - staticcheck + text: "SA(1006|1019|4006|4010|4017|5007|6005|9004):" + # Exclude lll issues for long lines with go:generate + - linters: + - lll + source: "^//go:generate " + + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-issues-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 + +linters: + disable-all: true + enable: + - deadcode + - errcheck + - goimports + - gosimple + - govet + - ineffassign + - staticcheck + - unconvert + - unused + - varcheck + fast: true + +# options for analysis running +run: + # default concurrency is a available CPU number + concurrency: 4 + + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 10m + + # exit code when at least one issue was found, default is 1 + issues-exit-code: 1 + + # include test files or not, default is true + tests: true + + # list of build tags, all linters use it. Default is empty list. + #build-tags: + # - mytag + + # which dirs to skip: issues from them won't be reported; + # can use regexp here: generated.*, regexp is applied on full path; + # default value is empty list, but default dirs are skipped independently + # from this option's value (see skip-dirs-use-default). + #skip-dirs: + # - src/external_libs + # - autogenerated_by_my_lib + + # default is true. Enables skipping of directories: + # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + skip-dirs-use-default: true + + # which files to skip: they will be analyzed, but issues from them + # won't be reported. Default value is empty list, but there is + # no need to include all autogenerated files, we confidently recognize + # autogenerated files. If it's not please let us know. + skip-files: + - ".*\\.hcl2spec\\.go$" + # - lib/bad.go + + # by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules": + # If invoked with -mod=readonly, the go command is disallowed from the implicit + # automatic updating of go.mod described above. Instead, it fails when any changes + # to go.mod are needed. This setting is most useful to check that go.mod does + # not need updates, such as in a continuous integration and testing system. + # If invoked with -mod=vendor, the go command assumes that the vendor + # directory holds the correct copies of dependencies and ignores + # the dependency descriptions in go.mod. + modules-download-mode: vendor + + +# output configuration options +output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" + format: colored-line-number + + # print lines of code with issue, default is true + print-issued-lines: true + + # print linter name in the end of issue text, default is true + print-linter-name: true + + # make issues output unique by line, default is true + uniq-by-line: true + + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + # [deprecated] comma-separated list of pairs of the form pkg:regex + # the regex is used to ignore names within pkg. (default "fmt:.*"). + # see https://github.com/kisielk/errcheck#the-deprecated-method for details + ignore: fmt:.*,io/ioutil:^Read.*,io:Close + + # path to a file containing a list of functions to exclude from checking + # see https://github.com/kisielk/errcheck#excluding-functions for details + #exclude: /path/to/file.txt diff --git a/.travis.yml b/.travis.yml index 83218bf8b..987dcdc71 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,20 +4,21 @@ env: os: - osx -sudo: false - language: go -go: - - 1.13.x - -script: - - df -h - - travis_wait make ci - branches: only: - master -matrix: +jobs: fast_finish: true + include: + - go: "1.13.x" + name: "go test" + script: + - df -h + - travis_wait make ci + - go: "1.13.x" + name: "go lint" + script: travis_wait make ci-lint + diff --git a/Makefile b/Makefile index 888648c25..7a5fd9fb8 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,8 @@ GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY) export GOLDFLAGS -.PHONY: bin checkversion ci default install-build-deps install-gen-deps fmt fmt-docs fmt-examples generate releasebin test testacc testrace +.PHONY: bin checkversion ci ci-lint default install-build-deps install-gen-deps fmt fmt-docs fmt-examples generate install-lint-deps lint \ + releasebin test testacc testrace default: install-build-deps install-gen-deps generate testrace dev releasebin package dev fmt fmt-check mode-check fmt-docs fmt-examples @@ -49,12 +50,16 @@ install-gen-deps: ## Install dependencies for code generation # dir. `go get` will change our deps and the following deps are not part of # out code dependencies; so a go mod tidy will remove them again. `go # install` seems to install the last tagged version and we want to install - # master. + # master. @(cd $(TEMPDIR) && GO111MODULE=on go get github.com/mna/pigeon@master) @(cd $(TEMPDIR) && GO111MODULE=on go get github.com/alvaroloes/enumer@master) @go install ./cmd/struct-markdown @go install ./cmd/mapstructure-to-hcl2 +install-lint-deps: ## Install linter dependencies + @echo "==> Updating linter dependencies..." + @curl -sSfL -q https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.23.1 + dev: ## Build and install a development build @grep 'const VersionPrerelease = ""' version/version.go > /dev/null ; if [ $$? -eq 0 ]; then \ echo "ERROR: You must add prerelease tags to version/version.go prior to making a dev build."; \ @@ -66,6 +71,20 @@ dev: ## Build and install a development build @cp $(GOPATH)/bin/packer bin/packer @cp $(GOPATH)/bin/packer pkg/$(GOOS)_$(GOARCH) +lint: install-lint-deps ## Lint Go code + @if [ ! -z $(PKG_NAME) ]; then \ + echo "golangci-lint run ./$(PKG_NAME)/..."; \ + golangci-lint run ./$(PKG_NAME)/...; \ + else \ + echo "golangci-lint run"; \ + golangci-lint run; \ + fi + +ci-lint: install-lint-deps ## On ci only lint newly added Go source files + @echo "==> Running linter on newly added Go source files..." + GO111MODULE=on golangci-lint run --new-from-rev=origin/master + + fmt: ## Format Go code @go fmt ./... diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go index 7b1fd70b9..ed60b5e95 100644 --- a/builder/amazon/chroot/builder.go +++ b/builder/amazon/chroot/builder.go @@ -10,11 +10,11 @@ package chroot import ( "context" "errors" - "github.com/hashicorp/packer/builder" "runtime" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/packer/builder" awscommon "github.com/hashicorp/packer/builder/amazon/common" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/common/chroot" diff --git a/builder/amazon/chroot/step_mount_device.go b/builder/amazon/chroot/step_mount_device.go index 262789620..dffd35854 100644 --- a/builder/amazon/chroot/step_mount_device.go +++ b/builder/amazon/chroot/step_mount_device.go @@ -4,13 +4,13 @@ import ( "bytes" "context" "fmt" - "github.com/hashicorp/packer/builder" "log" "os" "path/filepath" "strings" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/packer/builder" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" diff --git a/builder/amazon/chroot/step_prepare_device.go b/builder/amazon/chroot/step_prepare_device.go index cbc40d0f6..098d83858 100644 --- a/builder/amazon/chroot/step_prepare_device.go +++ b/builder/amazon/chroot/step_prepare_device.go @@ -3,10 +3,10 @@ package chroot import ( "context" "fmt" - "github.com/hashicorp/packer/builder" "log" "os" + "github.com/hashicorp/packer/builder" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" ) diff --git a/builder/amazon/common/interpolate_build_info_test.go b/builder/amazon/common/interpolate_build_info_test.go index 7f72f82ea..e85e167d6 100644 --- a/builder/amazon/common/interpolate_build_info_test.go +++ b/builder/amazon/common/interpolate_build_info_test.go @@ -1,12 +1,12 @@ package common import ( - "github.com/hashicorp/packer/builder" "reflect" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/packer/builder" "github.com/hashicorp/packer/helper/multistep" ) diff --git a/builder/amazon/common/step_modify_ami_attributes.go b/builder/amazon/common/step_modify_ami_attributes.go index ba34037af..057650a51 100644 --- a/builder/amazon/common/step_modify_ami_attributes.go +++ b/builder/amazon/common/step_modify_ami_attributes.go @@ -3,6 +3,7 @@ package common import ( "context" "fmt" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index a76454616..6d3e7f89a 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -11,6 +11,7 @@ package ebs import ( "context" "fmt" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/iam" "github.com/hashicorp/hcl/v2/hcldec" diff --git a/builder/amazon/ebssurrogate/builder.go b/builder/amazon/ebssurrogate/builder.go index 6007d3fa8..1a94b7c28 100644 --- a/builder/amazon/ebssurrogate/builder.go +++ b/builder/amazon/ebssurrogate/builder.go @@ -9,6 +9,7 @@ import ( "context" "errors" "fmt" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/iam" "github.com/hashicorp/hcl/v2/hcldec" diff --git a/builder/amazon/ebssurrogate/builder_test.go b/builder/amazon/ebssurrogate/builder_test.go index 9f914de28..663ea422c 100644 --- a/builder/amazon/ebssurrogate/builder_test.go +++ b/builder/amazon/ebssurrogate/builder_test.go @@ -1,9 +1,10 @@ package ebssurrogate import ( - "github.com/hashicorp/packer/builder/amazon/common" "testing" + "github.com/hashicorp/packer/builder/amazon/common" + "github.com/hashicorp/packer/packer" ) diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 9fc9d3d30..763960150 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -9,13 +9,13 @@ import ( "context" "errors" "fmt" - "github.com/hashicorp/packer/builder" "os" "strings" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/iam" "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/packer/builder" awscommon "github.com/hashicorp/packer/builder/amazon/common" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/communicator" diff --git a/builder/generated_data_test.go b/builder/generated_data_test.go index 998569237..09f673cc1 100644 --- a/builder/generated_data_test.go +++ b/builder/generated_data_test.go @@ -1,8 +1,9 @@ package builder import ( - "github.com/hashicorp/packer/helper/multistep" "testing" + + "github.com/hashicorp/packer/helper/multistep" ) func TestGeneratedData_Put(t *testing.T) { diff --git a/builder/lxc/step_provision.go b/builder/lxc/step_provision.go index e1ba2874d..fe9f3264b 100644 --- a/builder/lxc/step_provision.go +++ b/builder/lxc/step_provision.go @@ -2,9 +2,9 @@ package lxc import ( "context" - "github.com/hashicorp/packer/common" "log" + "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" ) diff --git a/builder/lxd/step_provision.go b/builder/lxd/step_provision.go index ec6da8a22..79f43b21a 100644 --- a/builder/lxd/step_provision.go +++ b/builder/lxd/step_provision.go @@ -2,9 +2,9 @@ package lxd import ( "context" - "github.com/hashicorp/packer/common" "log" + "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" ) diff --git a/builder/osc/chroot/step_chroot_provision.go b/builder/osc/chroot/step_chroot_provision.go index 25afd6edd..e621f2fb6 100644 --- a/builder/osc/chroot/step_chroot_provision.go +++ b/builder/osc/chroot/step_chroot_provision.go @@ -2,9 +2,9 @@ package chroot import ( "context" - "github.com/hashicorp/packer/common" "log" + "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" ) diff --git a/builder/parallels/common/ssh_config.go b/builder/parallels/common/ssh_config.go index c8b4d35f0..0c75be360 100644 --- a/builder/parallels/common/ssh_config.go +++ b/builder/parallels/common/ssh_config.go @@ -1,9 +1,10 @@ package common import ( + "time" + "github.com/hashicorp/packer/helper/communicator" "github.com/hashicorp/packer/template/interpolate" - "time" ) // SSHConfig contains the configuration for SSH communicator. diff --git a/builder/parallels/common/ssh_config_test.go b/builder/parallels/common/ssh_config_test.go index f49cafb97..f5adb0bd9 100644 --- a/builder/parallels/common/ssh_config_test.go +++ b/builder/parallels/common/ssh_config_test.go @@ -1,12 +1,13 @@ package common import ( - "github.com/hashicorp/packer/helper/communicator" - "github.com/hashicorp/packer/template/interpolate" "io/ioutil" "os" "testing" "time" + + "github.com/hashicorp/packer/helper/communicator" + "github.com/hashicorp/packer/template/interpolate" ) func testSSHConfig() *SSHConfig { diff --git a/builder/qemu/step_http_ip_discover.go b/builder/qemu/step_http_ip_discover.go index d8cbb2250..a20fb4886 100644 --- a/builder/qemu/step_http_ip_discover.go +++ b/builder/qemu/step_http_ip_discover.go @@ -2,6 +2,7 @@ package qemu import ( "context" + "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/multistep" ) diff --git a/builder/qemu/step_http_ip_discover_test.go b/builder/qemu/step_http_ip_discover_test.go index 9ee510b17..32c737ef7 100644 --- a/builder/qemu/step_http_ip_discover_test.go +++ b/builder/qemu/step_http_ip_discover_test.go @@ -2,9 +2,10 @@ package qemu import ( "context" + "testing" + "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/multistep" - "testing" ) func TestStepHTTPIPDiscover_Run(t *testing.T) { diff --git a/builder/vmware/common/step_http_ip_discover.go b/builder/vmware/common/step_http_ip_discover.go index 8d813480f..8a192a62c 100644 --- a/builder/vmware/common/step_http_ip_discover.go +++ b/builder/vmware/common/step_http_ip_discover.go @@ -3,10 +3,11 @@ package common import ( "context" "fmt" + "log" + "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "log" ) // Step to discover the http ip diff --git a/builder/vmware/common/step_http_ip_discover_test.go b/builder/vmware/common/step_http_ip_discover_test.go index 724da984b..ee147b8a2 100644 --- a/builder/vmware/common/step_http_ip_discover_test.go +++ b/builder/vmware/common/step_http_ip_discover_test.go @@ -3,9 +3,10 @@ package common import ( "context" "errors" + "testing" + "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/multistep" - "testing" ) func TestStepHTTPIPDiscover_Run(t *testing.T) { diff --git a/builder/vsphere/clone/builder.go b/builder/vsphere/clone/builder.go index bb3f9c574..a74bb4559 100644 --- a/builder/vsphere/clone/builder.go +++ b/builder/vsphere/clone/builder.go @@ -2,6 +2,7 @@ package clone import ( "context" + "github.com/hashicorp/hcl/v2/hcldec" "github.com/hashicorp/packer/builder/vsphere/common" "github.com/hashicorp/packer/builder/vsphere/driver" diff --git a/builder/vsphere/clone/builder_acc_test.go b/builder/vsphere/clone/builder_acc_test.go index ec988ed83..22bb345ab 100644 --- a/builder/vsphere/clone/builder_acc_test.go +++ b/builder/vsphere/clone/builder_acc_test.go @@ -1,13 +1,14 @@ package clone import ( + "os" + "testing" + "github.com/hashicorp/packer/builder/vsphere/common" commonT "github.com/hashicorp/packer/builder/vsphere/common/testing" builderT "github.com/hashicorp/packer/helper/builder/testing" "github.com/hashicorp/packer/packer" "github.com/vmware/govmomi/vim25/types" - "os" - "testing" ) func TestCloneBuilderAcc_default(t *testing.T) { diff --git a/builder/vsphere/clone/builder_test.go b/builder/vsphere/clone/builder_test.go index 788e0e245..5f57eb7ad 100644 --- a/builder/vsphere/clone/builder_test.go +++ b/builder/vsphere/clone/builder_test.go @@ -1,8 +1,9 @@ package clone import ( - "github.com/hashicorp/packer/packer" "testing" + + "github.com/hashicorp/packer/packer" ) func TestCloneBuilder_ImplementsBuilder(t *testing.T) { diff --git a/builder/vsphere/clone/step_clone.go b/builder/vsphere/clone/step_clone.go index 3c7ffb0c3..22c3059c7 100644 --- a/builder/vsphere/clone/step_clone.go +++ b/builder/vsphere/clone/step_clone.go @@ -6,6 +6,7 @@ package clone import ( "context" "fmt" + "github.com/hashicorp/packer/builder/vsphere/common" "github.com/hashicorp/packer/builder/vsphere/driver" "github.com/hashicorp/packer/helper/multistep" diff --git a/builder/vsphere/common/step_config_params.go b/builder/vsphere/common/step_config_params.go index ba81194da..65aa91789 100644 --- a/builder/vsphere/common/step_config_params.go +++ b/builder/vsphere/common/step_config_params.go @@ -6,6 +6,7 @@ package common import ( "context" "fmt" + "github.com/hashicorp/packer/builder/vsphere/driver" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" diff --git a/builder/vsphere/common/step_connect.go b/builder/vsphere/common/step_connect.go index 85136a8d4..b651eda06 100644 --- a/builder/vsphere/common/step_connect.go +++ b/builder/vsphere/common/step_connect.go @@ -6,6 +6,7 @@ package common import ( "context" "fmt" + "github.com/hashicorp/packer/builder/vsphere/driver" "github.com/hashicorp/packer/helper/multistep" ) diff --git a/builder/vsphere/common/step_hardware.go b/builder/vsphere/common/step_hardware.go index 20d67d080..e7025208a 100644 --- a/builder/vsphere/common/step_hardware.go +++ b/builder/vsphere/common/step_hardware.go @@ -6,6 +6,7 @@ package common import ( "context" "fmt" + "github.com/hashicorp/packer/builder/vsphere/driver" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" diff --git a/builder/vsphere/common/step_run.go b/builder/vsphere/common/step_run.go index 34fcc0c77..e18c4ee4a 100644 --- a/builder/vsphere/common/step_run.go +++ b/builder/vsphere/common/step_run.go @@ -5,10 +5,11 @@ package common import ( "context" + "strings" + "github.com/hashicorp/packer/builder/vsphere/driver" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "strings" ) type RunConfig struct { diff --git a/builder/vsphere/common/step_shutdown.go b/builder/vsphere/common/step_shutdown.go index c2124e84d..98142d421 100644 --- a/builder/vsphere/common/step_shutdown.go +++ b/builder/vsphere/common/step_shutdown.go @@ -7,11 +7,12 @@ import ( "bytes" "context" "fmt" + "log" + "time" + "github.com/hashicorp/packer/builder/vsphere/driver" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "log" - "time" ) type ShutdownConfig struct { diff --git a/builder/vsphere/common/step_snapshot.go b/builder/vsphere/common/step_snapshot.go index 4f7cd7604..c577cf035 100644 --- a/builder/vsphere/common/step_snapshot.go +++ b/builder/vsphere/common/step_snapshot.go @@ -2,6 +2,7 @@ package common import ( "context" + "github.com/hashicorp/packer/builder/vsphere/driver" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" diff --git a/builder/vsphere/common/step_template.go b/builder/vsphere/common/step_template.go index 42a2573e1..fad2b9470 100644 --- a/builder/vsphere/common/step_template.go +++ b/builder/vsphere/common/step_template.go @@ -2,6 +2,7 @@ package common import ( "context" + "github.com/hashicorp/packer/builder/vsphere/driver" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" diff --git a/builder/vsphere/common/step_wait_for_ip.go b/builder/vsphere/common/step_wait_for_ip.go index 51593b1f2..d5f0db772 100644 --- a/builder/vsphere/common/step_wait_for_ip.go +++ b/builder/vsphere/common/step_wait_for_ip.go @@ -6,11 +6,12 @@ package common import ( "context" "fmt" + "log" + "time" + "github.com/hashicorp/packer/builder/vsphere/driver" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "log" - "time" ) type WaitIpConfig struct { diff --git a/builder/vsphere/common/testing/utility.go b/builder/vsphere/common/testing/utility.go index 22a0d2e86..af10c4b10 100644 --- a/builder/vsphere/common/testing/utility.go +++ b/builder/vsphere/common/testing/utility.go @@ -3,13 +3,14 @@ package testing import ( "encoding/json" "fmt" - "github.com/hashicorp/packer/builder/vsphere/common" - "github.com/hashicorp/packer/builder/vsphere/driver" - "github.com/hashicorp/packer/packer" "math/rand" "os" "testing" "time" + + "github.com/hashicorp/packer/builder/vsphere/common" + "github.com/hashicorp/packer/builder/vsphere/driver" + "github.com/hashicorp/packer/packer" ) func NewVMName() string { diff --git a/builder/vsphere/driver/datastore.go b/builder/vsphere/driver/datastore.go index e553e9c3c..6a87f9a36 100644 --- a/builder/vsphere/driver/datastore.go +++ b/builder/vsphere/driver/datastore.go @@ -2,6 +2,7 @@ package driver import ( "fmt" + "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/soap" diff --git a/builder/vsphere/driver/driver.go b/builder/vsphere/driver/driver.go index e93fbe47b..8c8cca5bd 100644 --- a/builder/vsphere/driver/driver.go +++ b/builder/vsphere/driver/driver.go @@ -3,14 +3,15 @@ package driver import ( "context" "fmt" + "net/url" + "time" + "github.com/vmware/govmomi" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/session" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/soap" - "net/url" - "time" ) type Driver struct { diff --git a/builder/vsphere/driver/folder.go b/builder/vsphere/driver/folder.go index 7fdf40ed1..6349a0e04 100644 --- a/builder/vsphere/driver/folder.go +++ b/builder/vsphere/driver/folder.go @@ -2,6 +2,7 @@ package driver import ( "fmt" + "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" diff --git a/builder/vsphere/driver/resource_pool.go b/builder/vsphere/driver/resource_pool.go index 001e836d7..c81f1c23f 100644 --- a/builder/vsphere/driver/resource_pool.go +++ b/builder/vsphere/driver/resource_pool.go @@ -2,6 +2,7 @@ package driver import ( "fmt" + "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" diff --git a/builder/vsphere/driver/vm.go b/builder/vsphere/driver/vm.go index d177b20ba..06edbdfbd 100644 --- a/builder/vsphere/driver/vm.go +++ b/builder/vsphere/driver/vm.go @@ -4,12 +4,13 @@ import ( "context" "errors" "fmt" - "github.com/vmware/govmomi/object" - "github.com/vmware/govmomi/vim25/mo" - "github.com/vmware/govmomi/vim25/types" "log" "strings" "time" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" ) type VirtualMachine struct { diff --git a/builder/vsphere/driver/vm_cdrom.go b/builder/vsphere/driver/vm_cdrom.go index d33b75829..264e87d10 100644 --- a/builder/vsphere/driver/vm_cdrom.go +++ b/builder/vsphere/driver/vm_cdrom.go @@ -2,6 +2,7 @@ package driver import ( "errors" + "github.com/vmware/govmomi/vim25/types" ) diff --git a/builder/vsphere/driver/vm_keyboard.go b/builder/vsphere/driver/vm_keyboard.go index 481639fc1..616ec958c 100644 --- a/builder/vsphere/driver/vm_keyboard.go +++ b/builder/vsphere/driver/vm_keyboard.go @@ -1,11 +1,12 @@ package driver import ( + "strings" + "unicode" + "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/types" "golang.org/x/mobile/event/key" - "strings" - "unicode" ) type KeyInput struct { diff --git a/builder/vsphere/examples/driver/main.go b/builder/vsphere/examples/driver/main.go index 486304158..3f0235e9e 100644 --- a/builder/vsphere/examples/driver/main.go +++ b/builder/vsphere/examples/driver/main.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "github.com/hashicorp/packer/builder/vsphere/driver" ) diff --git a/builder/vsphere/iso/builder_acc_test.go b/builder/vsphere/iso/builder_acc_test.go index 0857d53ec..56de2818d 100644 --- a/builder/vsphere/iso/builder_acc_test.go +++ b/builder/vsphere/iso/builder_acc_test.go @@ -2,13 +2,14 @@ package iso import ( "fmt" + "io/ioutil" + "os" + "testing" + commonT "github.com/hashicorp/packer/builder/vsphere/common/testing" builderT "github.com/hashicorp/packer/helper/builder/testing" "github.com/hashicorp/packer/packer" "github.com/vmware/govmomi/vim25/types" - "io/ioutil" - "os" - "testing" ) func TestISOBuilderAcc_default(t *testing.T) { diff --git a/builder/vsphere/iso/step_add_cdrom.go b/builder/vsphere/iso/step_add_cdrom.go index f5742c0f8..068d9ce4c 100644 --- a/builder/vsphere/iso/step_add_cdrom.go +++ b/builder/vsphere/iso/step_add_cdrom.go @@ -6,6 +6,7 @@ package iso import ( "context" "fmt" + "github.com/hashicorp/packer/builder/vsphere/driver" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" diff --git a/builder/vsphere/iso/step_add_floppy.go b/builder/vsphere/iso/step_add_floppy.go index 51e987a63..caefff19d 100644 --- a/builder/vsphere/iso/step_add_floppy.go +++ b/builder/vsphere/iso/step_add_floppy.go @@ -6,6 +6,7 @@ package iso import ( "context" "fmt" + "github.com/hashicorp/packer/builder/vsphere/driver" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" diff --git a/builder/vsphere/iso/step_boot_command.go b/builder/vsphere/iso/step_boot_command.go index cb1154fbd..316325814 100644 --- a/builder/vsphere/iso/step_boot_command.go +++ b/builder/vsphere/iso/step_boot_command.go @@ -3,18 +3,19 @@ package iso import ( "context" "fmt" - "github.com/hashicorp/packer/builder/vsphere/driver" - packerCommon "github.com/hashicorp/packer/common" - "github.com/hashicorp/packer/helper/multistep" - "github.com/hashicorp/packer/packer" - "github.com/hashicorp/packer/template/interpolate" - "golang.org/x/mobile/event/key" "log" "net" "os" "strings" "time" "unicode/utf8" + + "github.com/hashicorp/packer/builder/vsphere/driver" + packerCommon "github.com/hashicorp/packer/common" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/template/interpolate" + "golang.org/x/mobile/event/key" ) type BootConfig struct { diff --git a/builder/vsphere/iso/step_remote_upload.go b/builder/vsphere/iso/step_remote_upload.go index ac395a66b..9eca285dc 100644 --- a/builder/vsphere/iso/step_remote_upload.go +++ b/builder/vsphere/iso/step_remote_upload.go @@ -3,10 +3,11 @@ package iso import ( "context" "fmt" + "path/filepath" + "github.com/hashicorp/packer/builder/vsphere/driver" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "path/filepath" ) type StepRemoteUpload struct { diff --git a/builder/vsphere/iso/step_remove_cdrom.go b/builder/vsphere/iso/step_remove_cdrom.go index 508676575..de943e675 100644 --- a/builder/vsphere/iso/step_remove_cdrom.go +++ b/builder/vsphere/iso/step_remove_cdrom.go @@ -5,6 +5,7 @@ package iso import ( "context" + "github.com/hashicorp/packer/builder/vsphere/driver" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" diff --git a/builder/vsphere/iso/step_remove_floppy.go b/builder/vsphere/iso/step_remove_floppy.go index 3844b8063..221ff9659 100644 --- a/builder/vsphere/iso/step_remove_floppy.go +++ b/builder/vsphere/iso/step_remove_floppy.go @@ -2,6 +2,7 @@ package iso import ( "context" + "github.com/hashicorp/packer/builder/vsphere/driver" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" diff --git a/command/build_parallel_test.go b/command/build_parallel_test.go index b6e4ceb8c..d4802de43 100644 --- a/command/build_parallel_test.go +++ b/command/build_parallel_test.go @@ -4,11 +4,12 @@ import ( "bytes" "context" "fmt" - "github.com/hashicorp/hcl/v2/hcldec" "path/filepath" "sync" "testing" + "github.com/hashicorp/hcl/v2/hcldec" + "golang.org/x/sync/errgroup" "github.com/hashicorp/packer/builder/file" diff --git a/common/shell-local/config.go b/common/shell-local/config.go index 37c2605f8..cc819ae30 100644 --- a/common/shell-local/config.go +++ b/common/shell-local/config.go @@ -5,7 +5,6 @@ package shell_local import ( "errors" "fmt" - // "log" "os" "path/filepath" "runtime" diff --git a/fix/fixer_amazon_temporary_security_group_cidrs.go b/fix/fixer_amazon_temporary_security_group_cidrs.go index c48a28011..0a420aecb 100644 --- a/fix/fixer_amazon_temporary_security_group_cidrs.go +++ b/fix/fixer_amazon_temporary_security_group_cidrs.go @@ -1,8 +1,9 @@ package fix import ( - "github.com/mitchellh/mapstructure" "strings" + + "github.com/mitchellh/mapstructure" ) type FixerAmazonTemporarySecurityCIDRs struct{} diff --git a/helper/ssh/tunnel_test.go b/helper/ssh/tunnel_test.go index 79c015e84..447a71ef3 100644 --- a/helper/ssh/tunnel_test.go +++ b/helper/ssh/tunnel_test.go @@ -1,8 +1,9 @@ package ssh import ( - "github.com/hashicorp/packer/communicator/ssh" "testing" + + "github.com/hashicorp/packer/communicator/ssh" ) const ( diff --git a/plugin/example/main.go b/plugin/example/main.go index 7f4edf6c4..416728260 100644 --- a/plugin/example/main.go +++ b/plugin/example/main.go @@ -17,7 +17,7 @@ package main import ( "github.com/hashicorp/packer/builder/amazon/chroot" "github.com/hashicorp/packer/packer/plugin" - "github.com/hashicorp/packer/post-processor/docker-push" + dockerpush "github.com/hashicorp/packer/post-processor/docker-push" "github.com/hashicorp/packer/provisioner/powershell" ) diff --git a/post-processor/vagrant/util.go b/post-processor/vagrant/util.go index 476136122..21f3398eb 100644 --- a/post-processor/vagrant/util.go +++ b/post-processor/vagrant/util.go @@ -5,7 +5,6 @@ import ( "compress/flate" "encoding/json" "fmt" - "github.com/hashicorp/packer/packer/tmp" "io" "log" "os" @@ -13,6 +12,7 @@ import ( "runtime" "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/packer/tmp" "github.com/klauspost/pgzip" ) diff --git a/template/interpolate/i.go b/template/interpolate/i.go index 54d1e516e..852f7f116 100644 --- a/template/interpolate/i.go +++ b/template/interpolate/i.go @@ -2,10 +2,11 @@ package interpolate import ( "bytes" - "github.com/google/uuid" "regexp" "strings" "text/template" + + "github.com/google/uuid" ) // Context is the context that an interpolation is done in. This defines From 7f0de5fc8dc1bf7fd1f3b4c196378883f2e1e33f Mon Sep 17 00:00:00 2001 From: jhawk28 Date: Fri, 14 Feb 2020 11:51:57 -0500 Subject: [PATCH 53/61] add ability to define multiple NICs for vsphere-iso (#8739) --- builder/vsphere/driver/vm.go | 72 ++++++++++++------- builder/vsphere/iso/config.hcl2spec.go | 2 + builder/vsphere/iso/step_create.go | 35 ++++++++- builder/vsphere/iso/step_create.hcl2spec.go | 51 ++++++++++--- .../docs/builders/vsphere-iso.html.md.erb | 7 ++ .../iso/_CreateConfig-not-required.html.md | 2 + .../vsphere/iso/_NIC-not-required.html.md | 8 +++ .../builder/vsphere/iso/_NIC-required.html.md | 4 ++ 8 files changed, 141 insertions(+), 40 deletions(-) create mode 100644 website/source/partials/builder/vsphere/iso/_NIC-not-required.html.md create mode 100644 website/source/partials/builder/vsphere/iso/_NIC-required.html.md diff --git a/builder/vsphere/driver/vm.go b/builder/vsphere/driver/vm.go index 06edbdfbd..aa1bae2fc 100644 --- a/builder/vsphere/driver/vm.go +++ b/builder/vsphere/driver/vm.go @@ -44,6 +44,13 @@ type HardwareConfig struct { VideoRAM int64 } +type NIC struct { + Network string // "" for default network + NetworkCard string // example: vmxnet3 + MacAddress string // set mac if want specific address + Passthrough *bool // direct path i/o +} + type CreateConfig struct { DiskThinProvisioned bool DiskControllerType string // example: "scsi", "pvscsi" @@ -57,8 +64,7 @@ type CreateConfig struct { ResourcePool string Datastore string GuestOS string // example: otherGuest - Network string // "" for default network - NetworkCard string // example: vmxnet3 + NICs []NIC USBController bool Version uint // example: 10 Firmware string // efi or bios @@ -509,42 +515,56 @@ func addDisk(_ *Driver, devices object.VirtualDeviceList, config *CreateConfig) } func addNetwork(d *Driver, devices object.VirtualDeviceList, config *CreateConfig) (object.VirtualDeviceList, error) { + if len(config.NICs) == 0 { + return nil, errors.New("no network adapters have been defined") + } + var network object.NetworkReference - if config.Network == "" { - h, err := d.FindHost(config.Host) + for _, nic := range config.NICs { + if nic.Network == "" { + h, err := d.FindHost(config.Host) + if err != nil { + return nil, err + } + + i, err := h.Info("network") + if err != nil { + return nil, err + } + + if len(i.Network) > 1 { + return nil, fmt.Errorf("Host has multiple networks. Specify it explicitly") + } + + network = object.NewNetwork(d.client.Client, i.Network[0]) + } else { + var err error + network, err = d.finder.Network(d.ctx, nic.Network) + if err != nil { + return nil, err + } + } + + backing, err := network.EthernetCardBackingInfo(d.ctx) if err != nil { return nil, err } - i, err := h.Info("network") + device, err := object.EthernetCardTypes().CreateEthernetCard(nic.NetworkCard, backing) if err != nil { return nil, err } - if len(i.Network) > 1 { - return nil, fmt.Errorf("Host has multiple networks. Specify it explicitly") + card := device.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard() + if nic.MacAddress != "" { + card.AddressType = string(types.VirtualEthernetCardMacTypeManual) + card.MacAddress = nic.MacAddress } + card.UptCompatibilityEnabled = nic.Passthrough - network = object.NewNetwork(d.client.Client, i.Network[0]) - } else { - var err error - network, err = d.finder.Network(d.ctx, config.Network) - if err != nil { - return nil, err - } + devices = append(devices, device) } - - backing, err := network.EthernetCardBackingInfo(d.ctx) - if err != nil { - return nil, err - } - - device, err := object.EthernetCardTypes().CreateEthernetCard(config.NetworkCard, backing) - if err != nil { - return nil, err - } - - return append(devices, device), nil + return devices, nil } func (vm *VirtualMachine) AddCdrom(controllerType string, isoPath string) error { diff --git a/builder/vsphere/iso/config.hcl2spec.go b/builder/vsphere/iso/config.hcl2spec.go index 222791485..a1c489147 100644 --- a/builder/vsphere/iso/config.hcl2spec.go +++ b/builder/vsphere/iso/config.hcl2spec.go @@ -32,6 +32,7 @@ type FlatConfig struct { DiskThinProvisioned *bool `mapstructure:"disk_thin_provisioned" cty:"disk_thin_provisioned"` Network *string `mapstructure:"network" cty:"network"` NetworkCard *string `mapstructure:"network_card" cty:"network_card"` + NICs []FlatNIC `mapstructure:"network_adapters" cty:"network_adapters"` USBController *bool `mapstructure:"usb_controller" cty:"usb_controller"` Notes *string `mapstructure:"notes" cty:"notes"` VMName *string `mapstructure:"vm_name" cty:"vm_name"` @@ -152,6 +153,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { "disk_thin_provisioned": &hcldec.AttrSpec{Name: "disk_thin_provisioned", Type: cty.Bool, Required: false}, "network": &hcldec.AttrSpec{Name: "network", Type: cty.String, Required: false}, "network_card": &hcldec.AttrSpec{Name: "network_card", Type: cty.String, Required: false}, + "network_adapters": &hcldec.BlockListSpec{TypeName: "network_adapters", Nested: hcldec.ObjectSpec((*FlatNIC)(nil).HCL2Spec())}, "usb_controller": &hcldec.AttrSpec{Name: "usb_controller", Type: cty.Bool, Required: false}, "notes": &hcldec.AttrSpec{Name: "notes", Type: cty.String, Required: false}, "vm_name": &hcldec.AttrSpec{Name: "vm_name", Type: cty.String, Required: false}, diff --git a/builder/vsphere/iso/step_create.go b/builder/vsphere/iso/step_create.go index b6e80aacf..a78c5d34e 100644 --- a/builder/vsphere/iso/step_create.go +++ b/builder/vsphere/iso/step_create.go @@ -1,5 +1,5 @@ //go:generate struct-markdown -//go:generate mapstructure-to-hcl2 -type CreateConfig +//go:generate mapstructure-to-hcl2 -type NIC,CreateConfig package iso @@ -13,6 +13,17 @@ import ( "github.com/hashicorp/packer/packer" ) +type NIC struct { + // Set network VM will be connected to. + Network string `mapstructure:"network"` + // Set VM network card type. Example `vmxnet3`. + NetworkCard string `mapstructure:"network_card" required:"true"` + // Set network card MAC address + MacAddress string `mapstructure:"mac_address"` + // Enable DirectPath I/O passthrough + Passthrough *bool `mapstructure:"passthrough"` +} + type CreateConfig struct { // Set VM hardware version. Defaults to the most current VM hardware // version supported by vCenter. See @@ -35,6 +46,8 @@ type CreateConfig struct { Network string `mapstructure:"network"` // Set VM network card type. Example `vmxnet3`. NetworkCard string `mapstructure:"network_card"` + // Network adapters + NICs []NIC `mapstructure:"network_adapters"` // Create USB controller for virtual machine. Defaults to `false`. USBController bool `mapstructure:"usb_controller"` // VM notes. @@ -83,6 +96,23 @@ func (s *StepCreateVM) Run(_ context.Context, state multistep.StateBag) multiste } ui.Say("Creating VM...") + + // add network/network card an the first nic for backwards compatibility in the type is defined + var networkCards []driver.NIC + if s.Config.NetworkCard != "" { + networkCards = append(networkCards, driver.NIC{ + NetworkCard: s.Config.NetworkCard, + Network: s.Config.Network}) + } + for _, nic := range s.Config.NICs { + networkCards = append(networkCards, driver.NIC{ + Network: nic.Network, + NetworkCard: nic.NetworkCard, + MacAddress: nic.MacAddress, + Passthrough: nic.Passthrough, + }) + } + vm, err = d.CreateVM(&driver.CreateConfig{ DiskThinProvisioned: s.Config.DiskThinProvisioned, DiskControllerType: s.Config.DiskControllerType, @@ -94,8 +124,7 @@ func (s *StepCreateVM) Run(_ context.Context, state multistep.StateBag) multiste ResourcePool: s.Location.ResourcePool, Datastore: s.Location.Datastore, GuestOS: s.Config.GuestOSType, - Network: s.Config.Network, - NetworkCard: s.Config.NetworkCard, + NICs: networkCards, USBController: s.Config.USBController, Version: s.Config.Version, Firmware: s.Config.Firmware, diff --git a/builder/vsphere/iso/step_create.hcl2spec.go b/builder/vsphere/iso/step_create.hcl2spec.go index a9f4342f9..e3eb36697 100644 --- a/builder/vsphere/iso/step_create.hcl2spec.go +++ b/builder/vsphere/iso/step_create.hcl2spec.go @@ -1,4 +1,4 @@ -// Code generated by "mapstructure-to-hcl2 -type CreateConfig"; DO NOT EDIT. +// Code generated by "mapstructure-to-hcl2 -type NIC,CreateConfig"; DO NOT EDIT. package iso import ( @@ -9,16 +9,17 @@ import ( // FlatCreateConfig is an auto-generated flat version of CreateConfig. // Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. type FlatCreateConfig struct { - Version *uint `mapstructure:"vm_version" cty:"vm_version"` - GuestOSType *string `mapstructure:"guest_os_type" cty:"guest_os_type"` - Firmware *string `mapstructure:"firmware" cty:"firmware"` - DiskControllerType *string `mapstructure:"disk_controller_type" cty:"disk_controller_type"` - DiskSize *int64 `mapstructure:"disk_size" cty:"disk_size"` - DiskThinProvisioned *bool `mapstructure:"disk_thin_provisioned" cty:"disk_thin_provisioned"` - Network *string `mapstructure:"network" cty:"network"` - NetworkCard *string `mapstructure:"network_card" cty:"network_card"` - USBController *bool `mapstructure:"usb_controller" cty:"usb_controller"` - Notes *string `mapstructure:"notes" cty:"notes"` + Version *uint `mapstructure:"vm_version" cty:"vm_version"` + GuestOSType *string `mapstructure:"guest_os_type" cty:"guest_os_type"` + Firmware *string `mapstructure:"firmware" cty:"firmware"` + DiskControllerType *string `mapstructure:"disk_controller_type" cty:"disk_controller_type"` + DiskSize *int64 `mapstructure:"disk_size" cty:"disk_size"` + DiskThinProvisioned *bool `mapstructure:"disk_thin_provisioned" cty:"disk_thin_provisioned"` + Network *string `mapstructure:"network" cty:"network"` + NetworkCard *string `mapstructure:"network_card" cty:"network_card"` + NICs []FlatNIC `mapstructure:"network_adapters" cty:"network_adapters"` + USBController *bool `mapstructure:"usb_controller" cty:"usb_controller"` + Notes *string `mapstructure:"notes" cty:"notes"` } // FlatMapstructure returns a new FlatCreateConfig. @@ -41,8 +42,36 @@ func (*FlatCreateConfig) HCL2Spec() map[string]hcldec.Spec { "disk_thin_provisioned": &hcldec.AttrSpec{Name: "disk_thin_provisioned", Type: cty.Bool, Required: false}, "network": &hcldec.AttrSpec{Name: "network", Type: cty.String, Required: false}, "network_card": &hcldec.AttrSpec{Name: "network_card", Type: cty.String, Required: false}, + "network_adapters": &hcldec.BlockListSpec{TypeName: "network_adapters", Nested: hcldec.ObjectSpec((*FlatNIC)(nil).HCL2Spec())}, "usb_controller": &hcldec.AttrSpec{Name: "usb_controller", Type: cty.Bool, Required: false}, "notes": &hcldec.AttrSpec{Name: "notes", Type: cty.String, Required: false}, } return s } + +// FlatNIC is an auto-generated flat version of NIC. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatNIC struct { + Network *string `mapstructure:"network" cty:"network"` + NetworkCard *string `mapstructure:"network_card" required:"true" cty:"network_card"` + MacAddress *string `mapstructure:"mac_address" cty:"mac_address"` + Passthrough *bool `mapstructure:"passthrough" cty:"passthrough"` +} + +// FlatMapstructure returns a new FlatNIC. +// FlatNIC is an auto-generated flat version of NIC. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*NIC) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { return new(FlatNIC) } + +// HCL2Spec returns the hcl spec of a NIC. +// This spec is used by HCL to read the fields of NIC. +// The decoded values from this spec will then be applied to a FlatNIC. +func (*FlatNIC) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "network": &hcldec.AttrSpec{Name: "network", Type: cty.String, Required: false}, + "network_card": &hcldec.AttrSpec{Name: "network_card", Type: cty.String, Required: false}, + "mac_address": &hcldec.AttrSpec{Name: "mac_address", Type: cty.String, Required: false}, + "passthrough": &hcldec.AttrSpec{Name: "passthrough", Type: cty.Bool, Required: false}, + } + return s +} diff --git a/website/source/docs/builders/vsphere-iso.html.md.erb b/website/source/docs/builders/vsphere-iso.html.md.erb index bf05eb64a..357268686 100644 --- a/website/source/docs/builders/vsphere-iso.html.md.erb +++ b/website/source/docs/builders/vsphere-iso.html.md.erb @@ -67,6 +67,9 @@ necessary for this build to succeed and can be found further down the page. ### Create Configuration <%= partial "partials/builder/vsphere/iso/CreateConfig-not-required" %> +### Network Adapter Configuration +<%= partial "partials/builder/vsphere/iso/NIC-required" %> + ### Floppy Configuration <%= partial "partials/builder/vsphere/iso/FloppyConfig-not-required" %> @@ -79,6 +82,10 @@ necessary for this build to succeed and can be found further down the page. <%= partial "partials/helper/communicator/Config-not-required" %> +#### Optional Network Adapter fields: + +<%= partial "partials/builder/vsphere/iso/NIC-not-required" %> + #### Optional SSH fields: <%= partial "partials/helper/communicator/SSH-not-required" %> diff --git a/website/source/partials/builder/vsphere/iso/_CreateConfig-not-required.html.md b/website/source/partials/builder/vsphere/iso/_CreateConfig-not-required.html.md index 09e632ee9..c27601606 100644 --- a/website/source/partials/builder/vsphere/iso/_CreateConfig-not-required.html.md +++ b/website/source/partials/builder/vsphere/iso/_CreateConfig-not-required.html.md @@ -21,6 +21,8 @@ - `network_card` (string) - Set VM network card type. Example `vmxnet3`. +- `network_adapters` ([]NIC) - Network adapters + - `usb_controller` (bool) - Create USB controller for virtual machine. Defaults to `false`. - `notes` (string) - VM notes. diff --git a/website/source/partials/builder/vsphere/iso/_NIC-not-required.html.md b/website/source/partials/builder/vsphere/iso/_NIC-not-required.html.md new file mode 100644 index 000000000..32b56d663 --- /dev/null +++ b/website/source/partials/builder/vsphere/iso/_NIC-not-required.html.md @@ -0,0 +1,8 @@ + + +- `network` (string) - Set network VM will be connected to. + +- `mac_address` (string) - Set network card MAC address + +- `passthrough` (\*bool) - Enable DirectPath I/O passthrough + \ No newline at end of file diff --git a/website/source/partials/builder/vsphere/iso/_NIC-required.html.md b/website/source/partials/builder/vsphere/iso/_NIC-required.html.md new file mode 100644 index 000000000..81b78c372 --- /dev/null +++ b/website/source/partials/builder/vsphere/iso/_NIC-required.html.md @@ -0,0 +1,4 @@ + + +- `network_card` (string) - Set VM network card type. Example `vmxnet3`. + \ No newline at end of file From f65198726d7a19655bf0bcc8b327a5260fee39c7 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 14 Feb 2020 09:10:53 -0800 Subject: [PATCH 54/61] update changelog --- CHANGELOG.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bfd23a731..ba4ee7c14 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,16 @@ +## 1.5.3 (February 14, 2020) + +### IMPROVEMENTS: +* builder/vsphere: Add ability to define multiple NICs for vsphere-iso + [GH-8739] +* builder/vsphere: Add option to remove CD-ROM drives. [GH-8690] +* core: Add validation to catch when users accidentally add duplicate fields to + template [GH-8725] + +### Bug Fixes: +* core/hcl2: Fix template prepare/validation for HCL2 templates [GH-8742] +* core: Fix `build` template function interpolation [GH-8727] + ## 1.5.2 (February 12, 2020) **New Builder** The vsphere-iso builder, previously maintained by JetBrains, has been merged with the Packer core. It will be officially supported by the From c229607ee3a372825b12c7a122f50d11f001a7a8 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 14 Feb 2020 10:30:18 -0800 Subject: [PATCH 55/61] update to v1.5.3 --- version/version.go | 2 +- website/config.rb | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/version/version.go b/version/version.go index 9d7cc7cac..4cdfe815d 100644 --- a/version/version.go +++ b/version/version.go @@ -14,7 +14,7 @@ const Version = "1.5.3" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" func FormattedVersion() string { var versionString bytes.Buffer diff --git a/website/config.rb b/website/config.rb index 6e5ad4ef8..6d65b51ff 100644 --- a/website/config.rb +++ b/website/config.rb @@ -2,7 +2,7 @@ set :base_url, "https://www.packer.io/" activate :hashicorp do |h| h.name = "packer" - h.version = "1.5.2" + h.version = "1.5.3" h.github_slug = "hashicorp/packer" h.website_root = "website" end From cebdabe0f71afe051b192398787b936aadf98c7e Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 14 Feb 2020 10:38:20 -0800 Subject: [PATCH 56/61] Cut version 1.5.3 From 909b9fcfc7fadbb3c52ffd0651511f383745f5ff Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 14 Feb 2020 11:16:07 -0800 Subject: [PATCH 57/61] update to 1.5.4 dev on master branch --- version/version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version/version.go b/version/version.go index 4cdfe815d..45f4ee319 100644 --- a/version/version.go +++ b/version/version.go @@ -9,12 +9,12 @@ import ( var GitCommit string // The main version number that is being run at the moment. -const Version = "1.5.3" +const Version = "1.5.4" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" func FormattedVersion() string { var versionString bytes.Buffer From 9c171c1f135e077ab2dfcb73626b05c615e06382 Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Fri, 14 Feb 2020 17:38:56 -0500 Subject: [PATCH 58/61] scripts: Update code signing scripts (#8746) * scripts/codesign_example: Fix reference to SHASUM_PROG variable * scripts/sign: Add check for required Artifactory token --- scripts/codesign_example.sh | 2 +- scripts/sign.sh | 9 ++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/scripts/codesign_example.sh b/scripts/codesign_example.sh index 14d7ed6ef..b7a93badc 100755 --- a/scripts/codesign_example.sh +++ b/scripts/codesign_example.sh @@ -156,6 +156,6 @@ signed_checksum=$( | grep -i "x-checksum-sha256" | awk 'gsub("[\r\n]", "", $2) {print $2;}' ) -echo "${signed_checksum} signed_${SN_ID}.zip" | SHASUM_PROG -c +echo "${signed_checksum} signed_${SN_ID}.zip" | $SHASUM_PROG -c mv "signed_${SN_ID}.zip" "$TARGET_ZIP" diff --git a/scripts/sign.sh b/scripts/sign.sh index 850aa5d0e..e74ad70e3 100755 --- a/scripts/sign.sh +++ b/scripts/sign.sh @@ -8,6 +8,13 @@ # export PRODUCT_NAME="packer" # export ARTIFACTORY_TOKEN=$ARTIFACTORY_TOKEN +ARTIFACTORY_TOKEN="${ARTIFACTORY_TOKEN:-""}" + +if [ -z "$ARTIFACTORY_TOKEN" ]; then + echo "Missing required Artifactory credentials" + exit 1 +fi + # Get the parent directory of where this script is. SOURCE="${BASH_SOURCE[0]}" while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done @@ -28,4 +35,4 @@ for DARWIN_BIN in $(find ./pkg/dist/*darwin_*.zip); do ./scripts/codesign_example.sh done -exit 0 \ No newline at end of file +exit 0 From ac239d118847d6bb24c2e925a2434c164f591a1b Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 14 Feb 2020 14:50:21 -0800 Subject: [PATCH 59/61] fix checking for codesign --- scripts/codesign_example.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/codesign_example.sh b/scripts/codesign_example.sh index b7a93badc..24badd2be 100755 --- a/scripts/codesign_example.sh +++ b/scripts/codesign_example.sh @@ -157,5 +157,9 @@ signed_checksum=$( ) echo "${signed_checksum} signed_${SN_ID}.zip" | $SHASUM_PROG -c +if [ $? -ne 0 ] +then + exit 1 +fi mv "signed_${SN_ID}.zip" "$TARGET_ZIP" From c9b011145d21f61d3d68aae714a8e9974cf5986a Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 14 Feb 2020 14:55:03 -0800 Subject: [PATCH 60/61] cut version 1.5.4 --- CHANGELOG.md | 4 ++++ version/version.go | 2 +- website/config.rb | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ba4ee7c14..1cb53470d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +## 1.5.4 (February 14, 2020) +no-change release to fix code-signing on OSX binaries. Since checksums for these +binaries has changed, we are releasing a second time to prevent confusion. + ## 1.5.3 (February 14, 2020) ### IMPROVEMENTS: diff --git a/version/version.go b/version/version.go index 45f4ee319..c223e6f49 100644 --- a/version/version.go +++ b/version/version.go @@ -14,7 +14,7 @@ const Version = "1.5.4" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" func FormattedVersion() string { var versionString bytes.Buffer diff --git a/website/config.rb b/website/config.rb index 6d65b51ff..54129118c 100644 --- a/website/config.rb +++ b/website/config.rb @@ -2,7 +2,7 @@ set :base_url, "https://www.packer.io/" activate :hashicorp do |h| h.name = "packer" - h.version = "1.5.3" + h.version = "1.5.4" h.github_slug = "hashicorp/packer" h.website_root = "website" end From 9a85fdd0a59d8f60b6db4a84dc32b3d195f642ce Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 14 Feb 2020 15:27:21 -0800 Subject: [PATCH 61/61] Cut version 1.5.4 --- scripts/codesign_example.sh | 3 +-- scripts/dist.sh | 3 +++ scripts/sign.sh | 3 +++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/scripts/codesign_example.sh b/scripts/codesign_example.sh index 24badd2be..7b1659c1e 100755 --- a/scripts/codesign_example.sh +++ b/scripts/codesign_example.sh @@ -157,8 +157,7 @@ signed_checksum=$( ) echo "${signed_checksum} signed_${SN_ID}.zip" | $SHASUM_PROG -c -if [ $? -ne 0 ] -then +if [ $? -ne 0 ]; then exit 1 fi diff --git a/scripts/dist.sh b/scripts/dist.sh index 7c3a25d0d..1cd42e7fb 100755 --- a/scripts/dist.sh +++ b/scripts/dist.sh @@ -40,6 +40,9 @@ for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do done ./scripts/sign.sh +if [ $? -ne 0 ]; then + exit 1 +fi if [ -z $NOSIGN ]; then echo "==> Signing..." diff --git a/scripts/sign.sh b/scripts/sign.sh index e74ad70e3..024e5e5bd 100755 --- a/scripts/sign.sh +++ b/scripts/sign.sh @@ -33,6 +33,9 @@ for DARWIN_BIN in $(find ./pkg/dist/*darwin_*.zip); do echo $TARGET_ZIP ./scripts/codesign_example.sh + if [ $? -ne 0 ]; then + exit 1 + fi done exit 0