From 8f3313d81e9712007db1d3202b8116522a7b5148 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 27 Mar 2019 12:09:08 -0700 Subject: [PATCH 01/47] Create new docker communicator for windows containers --- builder/docker/builder.go | 3 +- builder/docker/config.go | 34 +- builder/docker/driver_docker.go | 12 +- .../docker/windows_container_communicator.go | 441 ++++++++++++++++++ helper/communicator/config.go | 2 +- 5 files changed, 469 insertions(+), 23 deletions(-) create mode 100644 builder/docker/windows_container_communicator.go diff --git a/builder/docker/builder.go b/builder/docker/builder.go index f6a6e4765..8d3382c2e 100644 --- a/builder/docker/builder.go +++ b/builder/docker/builder.go @@ -50,7 +50,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook) (packer.Artifact, error) { Host: commHost, SSHConfig: b.config.Comm.SSHConfigFunc(), CustomConnect: map[string]multistep.Step{ - "docker": &StepConnectDocker{}, + "docker": &StepConnectDocker{}, + "dockerWindowsContainer": &StepConnectDocker{}, }, }, &common.StepProvision{}, diff --git a/builder/docker/config.go b/builder/docker/config.go index ef2d52a63..d7b2cd50c 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -23,21 +23,22 @@ type Config struct { common.PackerConfig `mapstructure:",squash"` Comm communicator.Config `mapstructure:",squash"` - Author string - Changes []string - Commit bool - ContainerDir string `mapstructure:"container_dir"` - Discard bool - ExecUser string `mapstructure:"exec_user"` - ExportPath string `mapstructure:"export_path"` - Image string - Message string - Privileged bool `mapstructure:"privileged"` - Pty bool - Pull bool - RunCommand []string `mapstructure:"run_command"` - Volumes map[string]string - FixUploadOwner bool `mapstructure:"fix_upload_owner"` + Author string + Changes []string + Commit bool + ContainerDir string `mapstructure:"container_dir"` + Discard bool + ExecUser string `mapstructure:"exec_user"` + ExportPath string `mapstructure:"export_path"` + Image string + Message string + Privileged bool `mapstructure:"privileged"` + Pty bool + Pull bool + RunCommand []string `mapstructure:"run_command"` + Volumes map[string]string + FixUploadOwner bool `mapstructure:"fix_upload_owner"` + WindowsContainer bool `windows_container` // This is used to login to dockerhub to pull a private base container. For // pushing to dockerhub, see the docker post-processors @@ -92,6 +93,9 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { // Default to the normal Docker type if c.Comm.Type == "" { c.Comm.Type = "docker" + if c.WindowsContainer { + c.Comm.Type = "dockerWindowsContainer" + } } var errs *packer.MultiError diff --git a/builder/docker/driver_docker.go b/builder/docker/driver_docker.go index 918dfdb2c..9b0a4556b 100644 --- a/builder/docker/driver_docker.go +++ b/builder/docker/driver_docker.go @@ -8,7 +8,7 @@ import ( "os" "os/exec" "regexp" - "runtime" + // "runtime" "strings" "sync" @@ -270,11 +270,11 @@ func (d *DockerDriver) StartContainer(config *ContainerConfig) (string, error) { args = append(args, "--privileged") } for host, guest := range config.Volumes { - if runtime.GOOS == "windows" { - // docker-toolbox can't handle the normal C:\filepath format in CLI - host = strings.Replace(host, "\\", "/", -1) - host = strings.Replace(host, "C:/", "/c/", 1) - } + // if runtime.GOOS == "windows" { + // // docker-toolbox can't handle the normal C:\filepath format in CLI + // host = strings.Replace(host, "\\", "/", -1) + // host = strings.Replace(host, "C:/", "/c/", 1) + // } args = append(args, "-v", fmt.Sprintf("%s:%s", host, guest)) } for _, v := range config.RunCommand { diff --git a/builder/docker/windows_container_communicator.go b/builder/docker/windows_container_communicator.go new file mode 100644 index 000000000..88054429c --- /dev/null +++ b/builder/docker/windows_container_communicator.go @@ -0,0 +1,441 @@ +package docker + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/packer/packer" +) + +type WindowsContainerCommunicator struct { + ContainerID string + HostDir string + ContainerDir string + Version *version.Version + Config *Config + ContainerUser string + lock sync.Mutex +} + +func (c *WindowsContainerCommunicator) Start(remote *packer.RemoteCmd) error { + dockerArgs := []string{ + "exec", + "-i", + c.ContainerID, + "powershell", + fmt.Sprintf("(%s)", remote.Command), + } + + if c.Config.Pty { + dockerArgs = append(dockerArgs[:2], append([]string{"-t"}, dockerArgs[2:]...)...) + } + + if c.Config.ExecUser != "" { + dockerArgs = append(dockerArgs[:2], + append([]string{"-u", c.Config.ExecUser}, dockerArgs[2:]...)...) + } + + cmd := exec.Command("docker", dockerArgs...) + + var ( + stdin_w io.WriteCloser + err error + ) + + stdin_w, err = cmd.StdinPipe() + if err != nil { + return err + } + + stderr_r, err := cmd.StderrPipe() + if err != nil { + return err + } + + stdout_r, err := cmd.StdoutPipe() + if err != nil { + return err + } + + // Run the actual command in a goroutine so that Start doesn't block + go c.run(cmd, remote, stdin_w, stdout_r, stderr_r) + + return nil +} + +// Upload uses docker exec to copy the file from the host to the container +func (c *WindowsContainerCommunicator) Upload(dst string, src io.Reader, fi *os.FileInfo) error { + // Create a temporary file to store the upload + tempfile, err := ioutil.TempFile(c.HostDir, "upload") + if err != nil { + return err + } + defer os.Remove(tempfile.Name()) + + // Copy the contents to the temporary file + _, err = io.Copy(tempfile, src) + if err != nil { + return err + } + + if fi != nil { + tempfile.Chmod((*fi).Mode()) + } + tempfile.Close() + + // Copy the file into place by copying the temporary file we put + // into the shared folder into the proper location in the container + cmd := &packer.RemoteCmd{ + Command: fmt.Sprintf("Copy-Item -Path %s/%s -Destination %s", c.ContainerDir, + filepath.Base(tempfile.Name()), dst), + } + + if err := c.Start(cmd); err != nil { + return err + } + + // Wait for the copy to complete + cmd.Wait() + if cmd.ExitStatus != 0 { + return fmt.Errorf("Upload failed with non-zero exit status: %d", cmd.ExitStatus) + } + + return nil +} + +func (c *WindowsContainerCommunicator) UploadDir(dst string, src string, exclude []string) error { + // Create the temporary directory that will store the contents of "src" + // for copying into the container. + td, err := ioutil.TempDir(c.HostDir, "dirupload") + if err != nil { + return err + } + defer os.RemoveAll(td) + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + relpath, err := filepath.Rel(src, path) + if err != nil { + return err + } + hostpath := filepath.Join(td, relpath) + + // If it is a directory, just create it + if info.IsDir() { + return os.MkdirAll(hostpath, info.Mode()) + } + + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + dest, err := os.Readlink(path) + + if err != nil { + return err + } + + return os.Symlink(dest, hostpath) + } + + // It is a file, copy it over, including mode. + src, err := os.Open(path) + if err != nil { + return err + } + defer src.Close() + + dst, err := os.Create(hostpath) + if err != nil { + return err + } + defer dst.Close() + + if _, err := io.Copy(dst, src); err != nil { + return err + } + + si, err := src.Stat() + if err != nil { + return err + } + + return dst.Chmod(si.Mode()) + } + + // Copy the entire directory tree to the temporary directory + if err := filepath.Walk(src, walkFn); err != nil { + return err + } + + // Determine the destination directory + containerSrc := filepath.Join(c.ContainerDir, filepath.Base(td)) + containerDst := dst + if src[len(src)-1] != '/' { + containerDst = filepath.Join(dst, filepath.Base(src)) + } + + // Make the directory, then copy into it + cmd := &packer.RemoteCmd{ + Command: fmt.Sprintf("set -e; mkdir -p %s; command cp -R %s/ %s", + containerDst, containerSrc, containerDst), + } + if err := c.Start(cmd); err != nil { + return err + } + + // Wait for the copy to complete + cmd.Wait() + if cmd.ExitStatus != 0 { + return fmt.Errorf("Upload failed with non-zero exit status: %d", cmd.ExitStatus) + } + + return nil +} + +func (c *WindowsContainerCommunicator) uploadFileOld(dst string, src io.Reader, fi *os.FileInfo) error { + // command format: docker cp /path/to/infile containerid:/path/to/outfile + log.Printf("Copying to %s on container %s.", dst, c.ContainerID) + + localCmd := exec.Command("docker", "cp", "-", + fmt.Sprintf("%s:%s", c.ContainerID, filepath.Dir(dst))) + + stderrP, err := localCmd.StderrPipe() + if err != nil { + return fmt.Errorf("Failed to open pipe: %s", err) + } + + stdin, err := localCmd.StdinPipe() + if err != nil { + return fmt.Errorf("Failed to open pipe: %s", err) + } + + if err := localCmd.Start(); err != nil { + return err + } + + archive := tar.NewWriter(stdin) + header, err := tar.FileInfoHeader(*fi, "") + if err != nil { + return err + } + header.Name = filepath.Base(dst) + archive.WriteHeader(header) + numBytes, err := io.Copy(archive, src) + if err != nil { + return fmt.Errorf("Failed to pipe upload: %s", err) + } + log.Printf("Copied %d bytes for %s", numBytes, dst) + + if err := archive.Close(); err != nil { + return fmt.Errorf("Failed to close archive: %s", err) + } + if err := stdin.Close(); err != nil { + return fmt.Errorf("Failed to close stdin: %s", err) + } + + stderrOut, err := ioutil.ReadAll(stderrP) + if err != nil { + return err + } + + if err := localCmd.Wait(); err != nil { + return fmt.Errorf("Failed to upload to '%s' in container: %s. %s.", dst, stderrOut, err) + } + + if err := c.fixDestinationOwner(dst); err != nil { + return err + } + + return nil +} + +func (c *WindowsContainerCommunicator) UploadDir(dst string, src string, exclude []string) error { + /* + from https://docs.docker.com/engine/reference/commandline/cp/#extended-description + SRC_PATH specifies a directory + DEST_PATH does not exist + DEST_PATH is created as a directory and the contents of the source directory are copied into this directory + DEST_PATH exists and is a file + Error condition: cannot copy a directory to a file + DEST_PATH exists and is a directory + SRC_PATH does not end with /. (that is: slash followed by dot) + the source directory is copied into this directory + SRC_PATH does end with /. (that is: slash followed by dot) + the content of the source directory is copied into this directory + + translating that in to our semantics: + + if source ends in / + docker cp src. dest + otherwise, cp source dest + + */ + + var dockerSource string + + if src[len(src)-1] == '/' { + dockerSource = fmt.Sprintf("%s.", src) + } else { + dockerSource = fmt.Sprintf("%s", src) + } + + // Make the directory, then copy into it + localCmd := exec.Command("docker", "cp", dockerSource, fmt.Sprintf("%s:%s", c.ContainerID, dst)) + + stderrP, err := localCmd.StderrPipe() + if err != nil { + return fmt.Errorf("Failed to open pipe: %s", err) + } + if err := localCmd.Start(); err != nil { + return fmt.Errorf("Failed to copy: %s", err) + } + stderrOut, err := ioutil.ReadAll(stderrP) + if err != nil { + return err + } + + // Wait for the copy to complete + if err := localCmd.Wait(); err != nil { + return fmt.Errorf("Failed to upload to '%s' in container: %s. %s.", dst, stderrOut, err) + } + + if err := c.fixDestinationOwner(dst); err != nil { + return err + } + + return nil +} + +// Download pulls a file out of a container using `docker cp`. We have a source +// path and want to write to an io.Writer, not a file. We use - to make docker +// cp to write to stdout, and then copy the stream to our destination io.Writer. +func (c *WindowsContainerCommunicator) Download(src string, dst io.Writer) error { + log.Printf("Downloading file from container: %s:%s", c.ContainerID, src) + localCmd := exec.Command("docker", "cp", fmt.Sprintf("%s:%s", c.ContainerID, src), "-") + + pipe, err := localCmd.StdoutPipe() + if err != nil { + return fmt.Errorf("Failed to open pipe: %s", err) + } + + if err = localCmd.Start(); err != nil { + return fmt.Errorf("Failed to start download: %s", err) + } + + // When you use - to send docker cp to stdout it is streamed as a tar; this + // enables it to work with directories. We don't actually support + // directories in Download() but we still need to handle the tar format. + archive := tar.NewReader(pipe) + _, err = archive.Next() + if err != nil { + return fmt.Errorf("Failed to read header from tar stream: %s", err) + } + + numBytes, err := io.Copy(dst, archive) + if err != nil { + return fmt.Errorf("Failed to pipe download: %s", err) + } + log.Printf("Copied %d bytes for %s", numBytes, src) + + if err = localCmd.Wait(); err != nil { + return fmt.Errorf("Failed to download '%s' from container: %s", src, err) + } + + return nil +} + +func (c *WindowsContainerCommunicator) DownloadDir(src string, dst string, exclude []string) error { + return fmt.Errorf("DownloadDir is not implemented for docker") +} + +// Runs the given command and blocks until completion +func (c *WindowsContainerCommunicator) run(cmd *exec.Cmd, remote *packer.RemoteCmd, stdin io.WriteCloser, stdout, stderr io.ReadCloser) { + // For Docker, remote communication must be serialized since it + // only supports single execution. + c.lock.Lock() + defer c.lock.Unlock() + + wg := sync.WaitGroup{} + repeat := func(w io.Writer, r io.ReadCloser) { + io.Copy(w, r) + r.Close() + wg.Done() + } + + if remote.Stdout != nil { + wg.Add(1) + go repeat(remote.Stdout, stdout) + } + + if remote.Stderr != nil { + wg.Add(1) + go repeat(remote.Stderr, stderr) + } + + // Start the command + log.Printf("Executing %s:", strings.Join(cmd.Args, " ")) + if err := cmd.Start(); err != nil { + log.Printf("Error executing: %s", err) + remote.SetExited(254) + return + } + + var exitStatus int + + if remote.Stdin != nil { + go func() { + io.Copy(stdin, remote.Stdin) + // close stdin to support commands that wait for stdin to be closed before exiting. + stdin.Close() + }() + } + + wg.Wait() + err := cmd.Wait() + + if exitErr, ok := err.(*exec.ExitError); ok { + exitStatus = 1 + + // There is no process-independent way to get the REAL + // exit status so we just try to go deeper. + if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { + exitStatus = status.ExitStatus() + } + } + + // Set the exit status which triggers waiters + remote.SetExited(exitStatus) +} + +// TODO Workaround for #5307. Remove once #5409 is fixed. +func (c *WindowsContainerCommunicator) fixDestinationOwner(destination string) error { + if !c.Config.FixUploadOwner { + return nil + } + + owner := c.ContainerUser + if owner == "" { + owner = "root" + } + + chownArgs := []string{ + "docker", "exec", "--user", "root", c.ContainerID, "/bin/sh", "-c", + fmt.Sprintf("chown -R %s %s", owner, destination), + } + if output, err := exec.Command(chownArgs[0], chownArgs[1:]...).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to set owner of the uploaded file: %s, %s", err, output) + } + + return nil +} diff --git a/helper/communicator/config.go b/helper/communicator/config.go index fa02810d7..5488beccd 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -212,7 +212,7 @@ func (c *Config) Prepare(ctx *interpolate.Context) []error { if es := c.prepareWinRM(ctx); len(es) > 0 { errs = append(errs, es...) } - case "docker", "none": + case "docker", "dockerWindowsContainer", "none": break default: return []error{fmt.Errorf("Communicator type %s is invalid", c.Type)} From 3b87f2a5191d3fcfefb762521869384fc6a6f969 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 27 Mar 2019 14:51:50 -0700 Subject: [PATCH 02/47] stop container before committing if windows --- builder/docker/config.go | 2 +- builder/docker/driver.go | 5 +- builder/docker/driver_docker.go | 7 +++ builder/docker/step_commit.go | 10 ++++ builder/docker/step_connect_docker.go | 30 +++++++--- builder/docker/step_run.go | 2 +- .../docker/windows_container_communicator.go | 57 ------------------- 7 files changed, 44 insertions(+), 69 deletions(-) diff --git a/builder/docker/config.go b/builder/docker/config.go index d7b2cd50c..684e88bd9 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -38,7 +38,7 @@ type Config struct { RunCommand []string `mapstructure:"run_command"` Volumes map[string]string FixUploadOwner bool `mapstructure:"fix_upload_owner"` - WindowsContainer bool `windows_container` + WindowsContainer bool `mapstructure:"windows_container"` // This is used to login to dockerhub to pull a private base container. For // pushing to dockerhub, see the docker post-processors diff --git a/builder/docker/driver.go b/builder/docker/driver.go index 7359a667d..73bf5e1a8 100644 --- a/builder/docker/driver.go +++ b/builder/docker/driver.go @@ -46,7 +46,10 @@ type Driver interface { // along with a potential error. StartContainer(*ContainerConfig) (string, error) - // StopContainer forcibly stops a container. + // KillContainer forcibly stops a container. + KillContainer(id string) error + + // StopContainer gently stops a container. StopContainer(id string) error // TagImage tags the image with the given ID diff --git a/builder/docker/driver_docker.go b/builder/docker/driver_docker.go index 9b0a4556b..6b946e954 100644 --- a/builder/docker/driver_docker.go +++ b/builder/docker/driver_docker.go @@ -314,6 +314,13 @@ func (d *DockerDriver) StartContainer(config *ContainerConfig) (string, error) { } func (d *DockerDriver) StopContainer(id string) error { + if err := exec.Command("docker", "stop", id).Run(); err != nil { + return err + } + return nil +} + +func (d *DockerDriver) KillContainer(id string) error { if err := exec.Command("docker", "kill", id).Run(); err != nil { return err } diff --git a/builder/docker/step_commit.go b/builder/docker/step_commit.go index d248ee64c..c75ab649c 100644 --- a/builder/docker/step_commit.go +++ b/builder/docker/step_commit.go @@ -19,6 +19,16 @@ func (s *StepCommit) Run(_ context.Context, state multistep.StateBag) multistep. config := state.Get("config").(*Config) ui := state.Get("ui").(packer.Ui) + if config.WindowsContainer { + // docker can't commit a running Windows container + err := driver.StopContainer(containerId) + if err != nil { + state.Put("error", err) + ui.Error(fmt.Sprintf("Error halting windows container for commit: %s", + err.Error())) + return multistep.ActionHalt + } + } ui.Say("Committing the container") imageId, err := driver.Commit(containerId, config.Author, config.Changes, config.Message) if err != nil { diff --git a/builder/docker/step_connect_docker.go b/builder/docker/step_connect_docker.go index ef0222a91..3cd4bc173 100644 --- a/builder/docker/step_connect_docker.go +++ b/builder/docker/step_connect_docker.go @@ -32,16 +32,28 @@ func (s *StepConnectDocker) Run(_ context.Context, state multistep.StateBag) mul // Create the communicator that talks to Docker via various // os/exec tricks. - comm := &Communicator{ - ContainerID: containerId, - HostDir: tempDir, - ContainerDir: config.ContainerDir, - Version: version, - Config: config, - ContainerUser: containerUser, - } + if config.WindowsContainer { + comm := &WindowsContainerCommunicator{ + ContainerID: containerId, + HostDir: tempDir, + ContainerDir: config.ContainerDir, + Version: version, + Config: config, + ContainerUser: containerUser, + } + state.Put("communicator", comm) - state.Put("communicator", comm) + } else { + comm := &Communicator{ + ContainerID: containerId, + HostDir: tempDir, + ContainerDir: config.ContainerDir, + Version: version, + Config: config, + ContainerUser: containerUser, + } + state.Put("communicator", comm) + } return multistep.ActionContinue } diff --git a/builder/docker/step_run.go b/builder/docker/step_run.go index 1d2ba6862..25c5dee0c 100644 --- a/builder/docker/step_run.go +++ b/builder/docker/step_run.go @@ -58,7 +58,7 @@ func (s *StepRun) Cleanup(state multistep.StateBag) { // just mean that the container doesn't exist anymore, which isn't a // big deal. ui.Say(fmt.Sprintf("Killing the container: %s", s.containerId)) - driver.StopContainer(s.containerId) + driver.KillContainer(s.containerId) // Reset the container ID so that we're idempotent s.containerId = "" diff --git a/builder/docker/windows_container_communicator.go b/builder/docker/windows_container_communicator.go index 88054429c..cc93975a4 100644 --- a/builder/docker/windows_container_communicator.go +++ b/builder/docker/windows_container_communicator.go @@ -260,63 +260,6 @@ func (c *WindowsContainerCommunicator) uploadFileOld(dst string, src io.Reader, return nil } -func (c *WindowsContainerCommunicator) UploadDir(dst string, src string, exclude []string) error { - /* - from https://docs.docker.com/engine/reference/commandline/cp/#extended-description - SRC_PATH specifies a directory - DEST_PATH does not exist - DEST_PATH is created as a directory and the contents of the source directory are copied into this directory - DEST_PATH exists and is a file - Error condition: cannot copy a directory to a file - DEST_PATH exists and is a directory - SRC_PATH does not end with /. (that is: slash followed by dot) - the source directory is copied into this directory - SRC_PATH does end with /. (that is: slash followed by dot) - the content of the source directory is copied into this directory - - translating that in to our semantics: - - if source ends in / - docker cp src. dest - otherwise, cp source dest - - */ - - var dockerSource string - - if src[len(src)-1] == '/' { - dockerSource = fmt.Sprintf("%s.", src) - } else { - dockerSource = fmt.Sprintf("%s", src) - } - - // Make the directory, then copy into it - localCmd := exec.Command("docker", "cp", dockerSource, fmt.Sprintf("%s:%s", c.ContainerID, dst)) - - stderrP, err := localCmd.StderrPipe() - if err != nil { - return fmt.Errorf("Failed to open pipe: %s", err) - } - if err := localCmd.Start(); err != nil { - return fmt.Errorf("Failed to copy: %s", err) - } - stderrOut, err := ioutil.ReadAll(stderrP) - if err != nil { - return err - } - - // Wait for the copy to complete - if err := localCmd.Wait(); err != nil { - return fmt.Errorf("Failed to upload to '%s' in container: %s. %s.", dst, stderrOut, err) - } - - if err := c.fixDestinationOwner(dst); err != nil { - return err - } - - return nil -} - // Download pulls a file out of a container using `docker cp`. We have a source // path and want to write to an io.Writer, not a file. We use - to make docker // cp to write to stdout, and then copy the stream to our destination io.Writer. From 0e6c779a88da7615f75afdd5f4a1df3cb6595fd6 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 27 Mar 2019 15:22:59 -0700 Subject: [PATCH 03/47] allow user to access env_var_format in windows_shell call --- provisioner/windows-shell/provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provisioner/windows-shell/provisioner.go b/provisioner/windows-shell/provisioner.go index 6d37e2514..993c92ab2 100644 --- a/provisioner/windows-shell/provisioner.go +++ b/provisioner/windows-shell/provisioner.go @@ -40,7 +40,7 @@ type Config struct { // This is used in the template generation to format environment variables // inside the `ExecuteCommand` template. - EnvVarFormat string + EnvVarFormat string `mapstructure:"env_var_format"` ctx interpolate.Context } From af01860fa9151c3e90bf6f0f2414f94c616d59f1 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 27 Mar 2019 15:29:22 -0700 Subject: [PATCH 04/47] remove old docker-toolbox limitation. --- builder/docker/driver_docker.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/builder/docker/driver_docker.go b/builder/docker/driver_docker.go index 6b946e954..59b4c984d 100644 --- a/builder/docker/driver_docker.go +++ b/builder/docker/driver_docker.go @@ -8,7 +8,6 @@ import ( "os" "os/exec" "regexp" - // "runtime" "strings" "sync" @@ -270,11 +269,6 @@ func (d *DockerDriver) StartContainer(config *ContainerConfig) (string, error) { args = append(args, "--privileged") } for host, guest := range config.Volumes { - // if runtime.GOOS == "windows" { - // // docker-toolbox can't handle the normal C:\filepath format in CLI - // host = strings.Replace(host, "\\", "/", -1) - // host = strings.Replace(host, "C:/", "/c/", 1) - // } args = append(args, "-v", fmt.Sprintf("%s:%s", host, guest)) } for _, v := range config.RunCommand { From 36f26343528fce0b96521663935b11c2abbd507f Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 28 Mar 2019 09:38:17 -0700 Subject: [PATCH 05/47] can't use docker cp so call powershell to do this natively. Fix implementation for upload, uploadDir, and download in windows container communicator --- builder/docker/step_connect_docker.go | 3 +- .../docker/windows_container_communicator.go | 210 +++--------------- 2 files changed, 34 insertions(+), 179 deletions(-) diff --git a/builder/docker/step_connect_docker.go b/builder/docker/step_connect_docker.go index 3cd4bc173..6f08ac326 100644 --- a/builder/docker/step_connect_docker.go +++ b/builder/docker/step_connect_docker.go @@ -33,13 +33,14 @@ func (s *StepConnectDocker) Run(_ context.Context, state multistep.StateBag) mul // Create the communicator that talks to Docker via various // os/exec tricks. if config.WindowsContainer { - comm := &WindowsContainerCommunicator{ + comm := &WindowsContainerCommunicator{Communicator{ ContainerID: containerId, HostDir: tempDir, ContainerDir: config.ContainerDir, Version: version, Config: config, ContainerUser: containerUser, + }, } state.Put("communicator", comm) diff --git a/builder/docker/windows_container_communicator.go b/builder/docker/windows_container_communicator.go index cc93975a4..46650a103 100644 --- a/builder/docker/windows_container_communicator.go +++ b/builder/docker/windows_container_communicator.go @@ -1,7 +1,7 @@ package docker import ( - "archive/tar" + "bytes" "fmt" "io" "io/ioutil" @@ -9,22 +9,12 @@ import ( "os" "os/exec" "path/filepath" - "strings" - "sync" - "syscall" - "github.com/hashicorp/go-version" "github.com/hashicorp/packer/packer" ) type WindowsContainerCommunicator struct { - ContainerID string - HostDir string - ContainerDir string - Version *version.Version - Config *Config - ContainerUser string - lock sync.Mutex + Communicator } func (c *WindowsContainerCommunicator) Start(remote *packer.RemoteCmd) error { @@ -87,7 +77,6 @@ func (c *WindowsContainerCommunicator) Upload(dst string, src io.Reader, fi *os. if err != nil { return err } - if fi != nil { tempfile.Chmod((*fi).Mode()) } @@ -165,12 +154,7 @@ func (c *WindowsContainerCommunicator) UploadDir(dst string, src string, exclude return err } - si, err := src.Stat() - if err != nil { - return err - } - - return dst.Chmod(si.Mode()) + return nil } // Copy the entire directory tree to the temporary directory @@ -187,8 +171,8 @@ func (c *WindowsContainerCommunicator) UploadDir(dst string, src string, exclude // Make the directory, then copy into it cmd := &packer.RemoteCmd{ - Command: fmt.Sprintf("set -e; mkdir -p %s; command cp -R %s/ %s", - containerDst, containerSrc, containerDst), + Command: fmt.Sprintf("Copy-Item %s -Destination %s -Recurse", + containerSrc, containerDst), } if err := c.Start(cmd); err != nil { return err @@ -203,96 +187,40 @@ func (c *WindowsContainerCommunicator) UploadDir(dst string, src string, exclude return nil } -func (c *WindowsContainerCommunicator) uploadFileOld(dst string, src io.Reader, fi *os.FileInfo) error { - // command format: docker cp /path/to/infile containerid:/path/to/outfile - log.Printf("Copying to %s on container %s.", dst, c.ContainerID) - - localCmd := exec.Command("docker", "cp", "-", - fmt.Sprintf("%s:%s", c.ContainerID, filepath.Dir(dst))) - - stderrP, err := localCmd.StderrPipe() - if err != nil { - return fmt.Errorf("Failed to open pipe: %s", err) - } - - stdin, err := localCmd.StdinPipe() - if err != nil { - return fmt.Errorf("Failed to open pipe: %s", err) - } - - if err := localCmd.Start(); err != nil { - return err - } - - archive := tar.NewWriter(stdin) - header, err := tar.FileInfoHeader(*fi, "") - if err != nil { - return err - } - header.Name = filepath.Base(dst) - archive.WriteHeader(header) - numBytes, err := io.Copy(archive, src) - if err != nil { - return fmt.Errorf("Failed to pipe upload: %s", err) - } - log.Printf("Copied %d bytes for %s", numBytes, dst) - - if err := archive.Close(); err != nil { - return fmt.Errorf("Failed to close archive: %s", err) - } - if err := stdin.Close(); err != nil { - return fmt.Errorf("Failed to close stdin: %s", err) - } - - stderrOut, err := ioutil.ReadAll(stderrP) - if err != nil { - return err - } - - if err := localCmd.Wait(); err != nil { - return fmt.Errorf("Failed to upload to '%s' in container: %s. %s.", dst, stderrOut, err) - } - - if err := c.fixDestinationOwner(dst); err != nil { - return err - } - - return nil -} - // Download pulls a file out of a container using `docker cp`. We have a source -// path and want to write to an io.Writer, not a file. We use - to make docker -// cp to write to stdout, and then copy the stream to our destination io.Writer. +// path and want to write to an io.Writer func (c *WindowsContainerCommunicator) Download(src string, dst io.Writer) error { log.Printf("Downloading file from container: %s:%s", c.ContainerID, src) - localCmd := exec.Command("docker", "cp", fmt.Sprintf("%s:%s", c.ContainerID, src), "-") + // Copy file onto temp file on mounted volume inside container + var stdout, stderr bytes.Buffer + cmd := &packer.RemoteCmd{ + Command: fmt.Sprintf("Copy-Item -Path %s -Destination %s/%s", src, c.ContainerDir, + filepath.Base(src)), + Stdout: &stdout, + Stderr: &stderr, + } + if err := c.Start(cmd); err != nil { + return err + } - pipe, err := localCmd.StdoutPipe() + // Wait for the copy to complete + cmd.Wait() + + if cmd.ExitStatus != 0 { + return fmt.Errorf("Failed to copy file to shared drive: %s, %s, %d", stderr.String(), stdout.String(), cmd.ExitStatus) + } + + // Read that copied file into a new file opened on host machine + fsrc, err := os.Open(filepath.Join(c.HostDir, filepath.Base(src))) if err != nil { - return fmt.Errorf("Failed to open pipe: %s", err) + return err } + defer fsrc.Close() + defer os.Remove(fsrc.Name()) - if err = localCmd.Start(); err != nil { - return fmt.Errorf("Failed to start download: %s", err) - } - - // When you use - to send docker cp to stdout it is streamed as a tar; this - // enables it to work with directories. We don't actually support - // directories in Download() but we still need to handle the tar format. - archive := tar.NewReader(pipe) - _, err = archive.Next() + _, err = io.Copy(dst, fsrc) if err != nil { - return fmt.Errorf("Failed to read header from tar stream: %s", err) - } - - numBytes, err := io.Copy(dst, archive) - if err != nil { - return fmt.Errorf("Failed to pipe download: %s", err) - } - log.Printf("Copied %d bytes for %s", numBytes, src) - - if err = localCmd.Wait(); err != nil { - return fmt.Errorf("Failed to download '%s' from container: %s", src, err) + return err } return nil @@ -306,79 +234,5 @@ func (c *WindowsContainerCommunicator) DownloadDir(src string, dst string, exclu func (c *WindowsContainerCommunicator) run(cmd *exec.Cmd, remote *packer.RemoteCmd, stdin io.WriteCloser, stdout, stderr io.ReadCloser) { // For Docker, remote communication must be serialized since it // only supports single execution. - c.lock.Lock() - defer c.lock.Unlock() - - wg := sync.WaitGroup{} - repeat := func(w io.Writer, r io.ReadCloser) { - io.Copy(w, r) - r.Close() - wg.Done() - } - - if remote.Stdout != nil { - wg.Add(1) - go repeat(remote.Stdout, stdout) - } - - if remote.Stderr != nil { - wg.Add(1) - go repeat(remote.Stderr, stderr) - } - - // Start the command - log.Printf("Executing %s:", strings.Join(cmd.Args, " ")) - if err := cmd.Start(); err != nil { - log.Printf("Error executing: %s", err) - remote.SetExited(254) - return - } - - var exitStatus int - - if remote.Stdin != nil { - go func() { - io.Copy(stdin, remote.Stdin) - // close stdin to support commands that wait for stdin to be closed before exiting. - stdin.Close() - }() - } - - wg.Wait() - err := cmd.Wait() - - if exitErr, ok := err.(*exec.ExitError); ok { - exitStatus = 1 - - // There is no process-independent way to get the REAL - // exit status so we just try to go deeper. - if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { - exitStatus = status.ExitStatus() - } - } - - // Set the exit status which triggers waiters - remote.SetExited(exitStatus) -} - -// TODO Workaround for #5307. Remove once #5409 is fixed. -func (c *WindowsContainerCommunicator) fixDestinationOwner(destination string) error { - if !c.Config.FixUploadOwner { - return nil - } - - owner := c.ContainerUser - if owner == "" { - owner = "root" - } - - chownArgs := []string{ - "docker", "exec", "--user", "root", c.ContainerID, "/bin/sh", "-c", - fmt.Sprintf("chown -R %s %s", owner, destination), - } - if output, err := exec.Command(chownArgs[0], chownArgs[1:]...).CombinedOutput(); err != nil { - return fmt.Errorf("Failed to set owner of the uploaded file: %s, %s", err, output) - } - - return nil + c.Communicator.run(cmd, remote, stdin, stdout, stderr) } From a01091952d2d4c71b549caf3aea4aa8e44f27715 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 28 Mar 2019 16:26:38 -0700 Subject: [PATCH 06/47] add documentation for windows_container flag --- website/source/docs/builders/docker.html.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/website/source/docs/builders/docker.html.md b/website/source/docs/builders/docker.html.md index d5ef7c632..c7384ae3a 100644 --- a/website/source/docs/builders/docker.html.md +++ b/website/source/docs/builders/docker.html.md @@ -217,6 +217,10 @@ You must specify (only) one of `commit`, `discard`, or `export_path`. mount into this container. The key of the object is the host path, the value is the container path. +- `windows_container` (bool) - If "true", tells Packer that you are building a + Windows container running on a windows host. This is necessary for building + Windows containers, because our normal docker bindings do not work for them. + - `container_dir` (string) - The directory inside container to mount temp directory from host server for work [file provisioner](/docs/provisioners/file.html). By default this is set to From 70150ffa0fcd55413f636286c95ce21c406f1ad2 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 28 Mar 2019 16:46:07 -0700 Subject: [PATCH 07/47] set powershell entrypoint for windows containers --- builder/docker/config.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/builder/docker/config.go b/builder/docker/config.go index 684e88bd9..8b41589e0 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -75,6 +75,9 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { // Defaults if len(c.RunCommand) == 0 { c.RunCommand = []string{"-d", "-i", "-t", "--entrypoint=/bin/sh", "--", "{{.Image}}"} + if c.WindowsContainer { + c.RunCommand = []string{"-d", "-i", "-t", "--entrypoint=powershell", "--", "{{.Image}}"} + } } // Default Pull if it wasn't set From 0860edeed80e373bee7a5d27bee6acb2e9750ee6 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 28 Mar 2019 16:55:35 -0700 Subject: [PATCH 08/47] fix mocks --- builder/docker/driver_mock.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/builder/docker/driver_mock.go b/builder/docker/driver_mock.go index 4cca3325b..4ffc69299 100644 --- a/builder/docker/driver_mock.go +++ b/builder/docker/driver_mock.go @@ -28,6 +28,10 @@ type MockDriver struct { IPAddressResult string IPAddressErr error + KillCalled bool + KillID string + KillError error + LoginCalled bool LoginUsername string LoginPassword string @@ -160,6 +164,12 @@ func (d *MockDriver) StartContainer(config *ContainerConfig) (string, error) { return d.StartID, d.StartError } +func (d *MockDriver) KillContainer(id string) error { + d.KillCalled = true + d.KillID = id + return d.KillError +} + func (d *MockDriver) StopContainer(id string) error { d.StopCalled = true d.StopID = id From 12b9004c76e377a00317f093cd1555349b8efe2c Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 29 Mar 2019 11:14:01 -0700 Subject: [PATCH 09/47] reduce duplicated code --- builder/docker/communicator.go | 6 +- builder/docker/step_connect_docker.go | 2 + .../docker/windows_container_communicator.go | 58 ------------------- 3 files changed, 5 insertions(+), 61 deletions(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index e898ecd1c..48b3de7c6 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -25,6 +25,7 @@ type Communicator struct { Config *Config ContainerUser string lock sync.Mutex + EntryPoint []string } func (c *Communicator) Start(remote *packer.RemoteCmd) error { @@ -32,10 +33,9 @@ func (c *Communicator) Start(remote *packer.RemoteCmd) error { "exec", "-i", c.ContainerID, - "/bin/sh", - "-c", - fmt.Sprintf("(%s)", remote.Command), } + dockerArgs = append(dockerArgs, c.EntryPoint...) + dockerArgs = append(dockerArgs, fmt.Sprintf("(%s)", remote.Command)) if c.Config.Pty { dockerArgs = append(dockerArgs[:2], append([]string{"-t"}, dockerArgs[2:]...)...) diff --git a/builder/docker/step_connect_docker.go b/builder/docker/step_connect_docker.go index 6f08ac326..583e69b36 100644 --- a/builder/docker/step_connect_docker.go +++ b/builder/docker/step_connect_docker.go @@ -40,6 +40,7 @@ func (s *StepConnectDocker) Run(_ context.Context, state multistep.StateBag) mul Version: version, Config: config, ContainerUser: containerUser, + EntryPoint: []string{"powershell"}, }, } state.Put("communicator", comm) @@ -52,6 +53,7 @@ func (s *StepConnectDocker) Run(_ context.Context, state multistep.StateBag) mul Version: version, Config: config, ContainerUser: containerUser, + EntryPoint: []string{"/bin/sh", "-c"}, } state.Put("communicator", comm) } diff --git a/builder/docker/windows_container_communicator.go b/builder/docker/windows_container_communicator.go index 46650a103..dbb74f406 100644 --- a/builder/docker/windows_container_communicator.go +++ b/builder/docker/windows_container_communicator.go @@ -7,7 +7,6 @@ import ( "io/ioutil" "log" "os" - "os/exec" "path/filepath" "github.com/hashicorp/packer/packer" @@ -17,52 +16,6 @@ type WindowsContainerCommunicator struct { Communicator } -func (c *WindowsContainerCommunicator) Start(remote *packer.RemoteCmd) error { - dockerArgs := []string{ - "exec", - "-i", - c.ContainerID, - "powershell", - fmt.Sprintf("(%s)", remote.Command), - } - - if c.Config.Pty { - dockerArgs = append(dockerArgs[:2], append([]string{"-t"}, dockerArgs[2:]...)...) - } - - if c.Config.ExecUser != "" { - dockerArgs = append(dockerArgs[:2], - append([]string{"-u", c.Config.ExecUser}, dockerArgs[2:]...)...) - } - - cmd := exec.Command("docker", dockerArgs...) - - var ( - stdin_w io.WriteCloser - err error - ) - - stdin_w, err = cmd.StdinPipe() - if err != nil { - return err - } - - stderr_r, err := cmd.StderrPipe() - if err != nil { - return err - } - - stdout_r, err := cmd.StdoutPipe() - if err != nil { - return err - } - - // Run the actual command in a goroutine so that Start doesn't block - go c.run(cmd, remote, stdin_w, stdout_r, stderr_r) - - return nil -} - // Upload uses docker exec to copy the file from the host to the container func (c *WindowsContainerCommunicator) Upload(dst string, src io.Reader, fi *os.FileInfo) error { // Create a temporary file to store the upload @@ -225,14 +178,3 @@ func (c *WindowsContainerCommunicator) Download(src string, dst io.Writer) error return nil } - -func (c *WindowsContainerCommunicator) DownloadDir(src string, dst string, exclude []string) error { - return fmt.Errorf("DownloadDir is not implemented for docker") -} - -// Runs the given command and blocks until completion -func (c *WindowsContainerCommunicator) run(cmd *exec.Cmd, remote *packer.RemoteCmd, stdin io.WriteCloser, stdout, stderr io.ReadCloser) { - // For Docker, remote communication must be serialized since it - // only supports single execution. - c.Communicator.run(cmd, remote, stdin, stdout, stderr) -} From 6407a579f0b4b5a0502f5062a2733f75ebeb1946 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 29 Mar 2019 11:21:07 -0700 Subject: [PATCH 10/47] Document why we need windows communicator in code --- builder/docker/windows_container_communicator.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/builder/docker/windows_container_communicator.go b/builder/docker/windows_container_communicator.go index dbb74f406..3db7bbfbb 100644 --- a/builder/docker/windows_container_communicator.go +++ b/builder/docker/windows_container_communicator.go @@ -12,6 +12,14 @@ import ( "github.com/hashicorp/packer/packer" ) +// Windows containers are a special beast in Docker; you can't use docker cp +// to move files between the container and host. + +// This communicator works around that limitation by reusing all possible +// methods and fields of the normal Docker Communicator, but we overwrite the +// Upload, Download, and UploadDir methods to utilize a mounted directory and +// native powershell commands rather than relying on docker cp. + type WindowsContainerCommunicator struct { Communicator } From af063341142938941f2652c320f99e379dd45b7a Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 29 Mar 2019 11:37:23 -0700 Subject: [PATCH 11/47] fix tests --- builder/docker/step_run_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/builder/docker/step_run_test.go b/builder/docker/step_run_test.go index bc6639319..c76c7c637 100644 --- a/builder/docker/step_run_test.go +++ b/builder/docker/step_run_test.go @@ -52,16 +52,16 @@ func TestStepRun(t *testing.T) { } // Verify we haven't called stop yet - if driver.StopCalled { + if driver.KillCalled { t.Fatal("should not have stopped") } // Cleanup step.Cleanup(state) - if !driver.StopCalled { + if !driver.KillCalled { t.Fatal("should've stopped") } - if driver.StopID != id { + if driver.KillID != id { t.Fatalf("bad: %#v", driver.StopID) } } @@ -85,13 +85,13 @@ func TestStepRun_error(t *testing.T) { } // Verify we haven't called stop yet - if driver.StopCalled { + if driver.KillCalled { t.Fatal("should not have stopped") } // Cleanup step.Cleanup(state) - if driver.StopCalled { + if driver.KillCalled { t.Fatal("should not have stopped") } } From b079d7ba1210872919e8531c6883d23129f56d9f Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 29 Mar 2019 13:39:02 -0700 Subject: [PATCH 12/47] add windows container example --- website/source/docs/builders/docker.html.md | 34 +++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/website/source/docs/builders/docker.html.md b/website/source/docs/builders/docker.html.md index c7384ae3a..83726aa33 100644 --- a/website/source/docs/builders/docker.html.md +++ b/website/source/docs/builders/docker.html.md @@ -210,8 +210,11 @@ You must specify (only) one of `commit`, `discard`, or `export_path`. - `run_command` (array of strings) - An array of arguments to pass to `docker run` in order to run the container. By default this is set to - `["-d", "-i", "-t", "{{.Image}}", "/bin/sh"]`. As you can see, you have a - couple template variables to customize, as well. + `["-d", "-i", "-t", "--entrypoint=/bin/sh", "--", "{{.Image}}"]` if you are + using a linux container, and + `["-d", "-i", "-t", "--entrypoint=powershell", "--", "{{.Image}}"]` if you + are running a windows container. {{.Image}} is a template variable that + corresponds to the `image` template option. - `volumes` (map of strings to strings) - A mapping of additional volumes to mount into this container. The key of the object is the host path, the @@ -338,6 +341,33 @@ nearly-identical sequence definitions, as demonstrated by the example below: +## Docker For Windows + +You should be able to run docker builds against both linux and Windows +containers. Windows containers use a different communicator than linux +containers, because Windows containers cannot use `docker cp`. + +If you are building a Windows container, you must set the template option +`"windows_container": true`. Please note that docker cannot export Windows +containers, so you must either commit or discard them. + +The following is a fully functional template for building a Windows +container. + +``` json +{ + "builders": [ + { + "type": "docker", + "image": "microsoft/windowsservercore:1709", + "container_dir": "c:/app", + "windows_container": true, + "commit": true + } + ] +} +``` + ## Amazon EC2 Container Registry Packer can tag and push images for use in [Amazon EC2 Container From eb274c4e87033e9fc107464e9bed1d3483a2874d Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Mon, 1 Apr 2019 17:49:11 +0000 Subject: [PATCH 13/47] Power off before shapshotting --- builder/azure/arm/builder.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index 096fa322c..329e8e4f5 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -234,9 +234,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook) (packer.Artifact, error) { &packerCommon.StepProvision{}, NewStepGetOSDisk(azureClient, ui), NewStepGetAdditionalDisks(azureClient, ui), + NewStepPowerOffCompute(azureClient, ui), NewStepSnapshotOSDisk(azureClient, ui, b.config), NewStepSnapshotDataDisks(azureClient, ui, b.config), - NewStepPowerOffCompute(azureClient, ui), NewStepCaptureImage(azureClient, ui), NewStepDeleteResourceGroup(azureClient, ui), NewStepDeleteOSDisk(azureClient, ui), From f0295a7ca31c0f78c76b19ed80ab4ec43fd49478 Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Tue, 2 Apr 2019 10:15:29 +0200 Subject: [PATCH 14/47] fix tty to avoid panic after resizing term near exit time --- go.mod | 2 +- go.sum | 4 ++-- vendor/github.com/mattn/go-tty/tty_unix.go | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index d713f22fe..bb8a45896 100644 --- a/go.mod +++ b/go.mod @@ -106,7 +106,7 @@ require ( github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c // indirect github.com/masterzen/simplexml v0.0.0-20140219194429-95ba30457eb1 // indirect github.com/masterzen/winrm v0.0.0-20180224160350-7e40f93ae939 - github.com/mattn/go-tty v0.0.0-20190322114730-5518497423d1 + github.com/mattn/go-tty v0.0.0-20190402035014-76a2065f1a95 github.com/miekg/dns v1.1.1 // indirect github.com/mitchellh/cli v0.0.0-20170908181043-65fcae5817c8 github.com/mitchellh/copystructure v1.0.0 // indirect diff --git a/go.sum b/go.sum index dd55fe0a6..21204b7dd 100644 --- a/go.sum +++ b/go.sum @@ -285,8 +285,8 @@ github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-tty v0.0.0-20190322114730-5518497423d1 h1:VRq8MIkqXRI9aytxCxOTDiQorwXRt8vlSOapwx8Ubys= -github.com/mattn/go-tty v0.0.0-20190322114730-5518497423d1/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/mattn/go-tty v0.0.0-20190402035014-76a2065f1a95 h1:zyW7ieMaS0tXk1KgXxFgQ+HAMmxYpwe7MiAzE4C/rkE= +github.com/mattn/go-tty v0.0.0-20190402035014-76a2065f1a95/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= diff --git a/vendor/github.com/mattn/go-tty/tty_unix.go b/vendor/github.com/mattn/go-tty/tty_unix.go index 579df5c2e..2a4350cca 100644 --- a/vendor/github.com/mattn/go-tty/tty_unix.go +++ b/vendor/github.com/mattn/go-tty/tty_unix.go @@ -52,6 +52,7 @@ func open() (*TTY, error) { tty.ss = make(chan os.Signal, 1) signal.Notify(tty.ss, syscall.SIGWINCH) go func() { + defer close(tty.ws) for sig := range tty.ss { switch sig { case syscall.SIGWINCH: @@ -78,8 +79,8 @@ func (tty *TTY) readRune() (rune, error) { } func (tty *TTY) close() error { + signal.Stop(tty.ss) close(tty.ss) - close(tty.ws) _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(tty.in.Fd()), ioctlWriteTermios, uintptr(unsafe.Pointer(&tty.termios)), 0, 0, 0) return err } From 12fc1fa751175411b83b8f278188840cf540273b Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 2 Apr 2019 16:51:58 -0700 Subject: [PATCH 15/47] default_keep_input_artifact --- packer/build.go | 24 +++++++++-- packer/plugin/post_processor.go | 2 +- packer/post_processor.go | 8 +++- packer/post_processor_mock.go | 11 ++--- packer/rpc/post_processor.go | 28 +++++++------ .../alicloud-import/post-processor.go | 42 +++++++++---------- .../amazon-import/post-processor.go | 38 ++++++++--------- post-processor/artifice/post-processor.go | 4 +- post-processor/checksum/post-processor.go | 16 +++---- post-processor/compress/post-processor.go | 20 ++++----- .../digitalocean-import/post-processor.go | 18 ++++---- .../docker-import/post-processor.go | 8 ++-- post-processor/docker-push/post-processor.go | 12 +++--- post-processor/docker-save/post-processor.go | 10 ++--- post-processor/docker-tag/post-processor.go | 10 +++-- .../googlecompute-export/post-processor.go | 12 +++--- .../googlecompute-import/post-processor.go | 16 +++---- post-processor/manifest/post-processor.go | 14 ++++--- post-processor/shell-local/post-processor.go | 6 +-- .../vagrant-cloud/post-processor.go | 12 +++--- post-processor/vagrant/post-processor.go | 13 ++++-- .../vsphere-template/post-processor.go | 12 +++--- post-processor/vsphere/post-processor.go | 10 ++--- template/template.go | 4 +- 24 files changed, 193 insertions(+), 157 deletions(-) diff --git a/packer/build.go b/packer/build.go index bc78c4f0f..d2725035d 100644 --- a/packer/build.go +++ b/packer/build.go @@ -109,7 +109,7 @@ type coreBuildPostProcessor struct { processor PostProcessor processorType string config map[string]interface{} - keepInputArtifact bool + keepInputArtifact *bool } // Keeps track of the provisioner and the configuration of the provisioner @@ -262,7 +262,7 @@ PostProcessorRunSeqLoop: builderUi.Say(fmt.Sprintf("Running post-processor: %s", corePP.processorType)) ts := CheckpointReporter.AddSpan(corePP.processorType, "post-processor", corePP.config) - artifact, keep, err := corePP.processor.PostProcess(ppUi, priorArtifact) + artifact, defaultKeep, forceOverride, err := corePP.processor.PostProcess(ppUi, priorArtifact) ts.End(err) if err != nil { errors = append(errors, fmt.Errorf("Post-processor failed: %s", err)) @@ -274,7 +274,23 @@ PostProcessorRunSeqLoop: continue PostProcessorRunSeqLoop } - keep = keep || corePP.keepInputArtifact + keep := defaultKeep + // When nil, go for the default. If overridden by user, use that + // instead. + // Exception: for postprocessors that will fail/become + // useless if keep isn't set, force an override that still uses + // post-processor preference instead of user preference. + if corePP.keepInputArtifact != nil { + if *corePP.keepInputArtifact == false && forceOverride { + log.Printf("The %s post-processor forces "+ + "keep_input_artifact=true to preserve integrity of the"+ + "build chain. User-set keep_input_artifact=false will be"+ + "ignored.", corePP.processorType) + } else { + // User overrides default + keep = *corePP.keepInputArtifact + } + } if i == 0 { // This is the first post-processor. We handle deleting // previous artifacts a bit different because multiple @@ -314,7 +330,7 @@ PostProcessorRunSeqLoop: } else { log.Printf("Deleting original artifact for build '%s'", b.name) if err := builderArtifact.Destroy(); err != nil { - errors = append(errors, fmt.Errorf("Error destroying builder artifact: %s", err)) + errors = append(errors, fmt.Errorf("Error destroying builder artifact: %s; bad artifact: %#v", err, builderArtifact.Files())) } } diff --git a/packer/plugin/post_processor.go b/packer/plugin/post_processor.go index b65c76623..45ca80977 100644 --- a/packer/plugin/post_processor.go +++ b/packer/plugin/post_processor.go @@ -20,7 +20,7 @@ func (c *cmdPostProcessor) Configure(config ...interface{}) error { return c.p.Configure(config...) } -func (c *cmdPostProcessor) PostProcess(ui packer.Ui, a packer.Artifact) (packer.Artifact, bool, error) { +func (c *cmdPostProcessor) PostProcess(ui packer.Ui, a packer.Artifact) (packer.Artifact, bool, bool, error) { defer func() { r := recover() c.checkExit(r, nil) diff --git a/packer/post_processor.go b/packer/post_processor.go index 3def4a0dd..2158325f3 100644 --- a/packer/post_processor.go +++ b/packer/post_processor.go @@ -13,6 +13,10 @@ type PostProcessor interface { // PostProcess takes a previously created Artifact and produces another // Artifact. If an error occurs, it should return that error. If `keep` - // is to true, then the previous artifact is forcibly kept. - PostProcess(Ui, Artifact) (a Artifact, keep bool, err error) + // is true, then the previous artifact defaults to being kept if + // user has not given a value to keep_input_artifact. If forceOverride + // is true, then any user input for keep_input_artifact is ignored and + // the artifact is either kept or discarded according to the value set in + // `keep`. + PostProcess(Ui, Artifact) (a Artifact, keep bool, forceOverride bool, err error) } diff --git a/packer/post_processor_mock.go b/packer/post_processor_mock.go index 591e4b876..f1273ee03 100644 --- a/packer/post_processor_mock.go +++ b/packer/post_processor_mock.go @@ -3,9 +3,10 @@ package packer // MockPostProcessor is an implementation of PostProcessor that can be // used for tests. type MockPostProcessor struct { - ArtifactId string - Keep bool - Error error + ArtifactId string + Keep bool + ForceOverride bool + Error error ConfigureCalled bool ConfigureConfigs []interface{} @@ -22,12 +23,12 @@ func (t *MockPostProcessor) Configure(configs ...interface{}) error { return t.ConfigureError } -func (t *MockPostProcessor) PostProcess(ui Ui, a Artifact) (Artifact, bool, error) { +func (t *MockPostProcessor) PostProcess(ui Ui, a Artifact) (Artifact, bool, bool, error) { t.PostProcessCalled = true t.PostProcessArtifact = a t.PostProcessUi = ui return &MockArtifact{ IdValue: t.ArtifactId, - }, t.Keep, t.Error + }, t.Keep, t.ForceOverride, t.Error } diff --git a/packer/rpc/post_processor.go b/packer/rpc/post_processor.go index 0107558de..429470122 100644 --- a/packer/rpc/post_processor.go +++ b/packer/rpc/post_processor.go @@ -25,9 +25,10 @@ type PostProcessorConfigureArgs struct { } type PostProcessorProcessResponse struct { - Err *BasicError - Keep bool - StreamId uint32 + Err *BasicError + Keep bool + ForceOverride bool + StreamId uint32 } func (p *postProcessor) Configure(raw ...interface{}) (err error) { @@ -39,7 +40,7 @@ func (p *postProcessor) Configure(raw ...interface{}) (err error) { return } -func (p *postProcessor) PostProcess(ui packer.Ui, a packer.Artifact) (packer.Artifact, bool, error) { +func (p *postProcessor) PostProcess(ui packer.Ui, a packer.Artifact) (packer.Artifact, bool, bool, error) { nextId := p.mux.NextId() server := newServerWithMux(p.mux, nextId) server.RegisterArtifact(a) @@ -48,23 +49,23 @@ func (p *postProcessor) PostProcess(ui packer.Ui, a packer.Artifact) (packer.Art var response PostProcessorProcessResponse if err := p.client.Call("PostProcessor.PostProcess", nextId, &response); err != nil { - return nil, false, err + return nil, false, false, err } if response.Err != nil { - return nil, false, response.Err + return nil, false, false, response.Err } if response.StreamId == 0 { - return nil, false, nil + return nil, false, false, nil } client, err := newClientWithMux(p.mux, response.StreamId) if err != nil { - return nil, false, err + return nil, false, false, err } - return client.Artifact(), response.Keep, nil + return client.Artifact(), response.Keep, response.ForceOverride, nil } func (p *PostProcessorServer) Configure(args *PostProcessorConfigureArgs, reply *interface{}) error { @@ -80,7 +81,7 @@ func (p *PostProcessorServer) PostProcess(streamId uint32, reply *PostProcessorP defer client.Close() streamId = 0 - artifactResult, keep, err := p.p.PostProcess(client.Ui(), client.Artifact()) + artifactResult, keep, forceOverride, err := p.p.PostProcess(client.Ui(), client.Artifact()) if err == nil && artifactResult != nil { streamId = p.mux.NextId() server := newServerWithMux(p.mux, streamId) @@ -89,9 +90,10 @@ func (p *PostProcessorServer) PostProcess(streamId uint32, reply *PostProcessorP } *reply = PostProcessorProcessResponse{ - Err: NewBasicError(err), - Keep: keep, - StreamId: streamId, + Err: NewBasicError(err), + Keep: keep, + ForceOverride: forceOverride, + StreamId: streamId, } return nil diff --git a/post-processor/alicloud-import/post-processor.go b/post-processor/alicloud-import/post-processor.go index 57344321c..ccc752f1b 100644 --- a/post-processor/alicloud-import/post-processor.go +++ b/post-processor/alicloud-import/post-processor.go @@ -118,13 +118,13 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { return nil } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { var err error // Render this key since we didn't in the configure phase p.config.OSSKey, err = interpolate.Render(p.config.OSSKey, &p.config.ctx) if err != nil { - return nil, false, fmt.Errorf("Error rendering oss_key_name template: %s", err) + return nil, false, false, fmt.Errorf("Error rendering oss_key_name template: %s", err) } if p.config.OSSKey == "" { p.config.OSSKey = "Packer_" + strconv.Itoa(time.Now().Nanosecond()) @@ -143,12 +143,12 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac // Hope we found something useful if source == "" { - return nil, false, fmt.Errorf("No vhd or raw file found in artifact from builder") + return nil, false, false, fmt.Errorf("No vhd or raw file found in artifact from builder") } ecsClient, err := p.config.AlicloudAccessConfig.Client() if err != nil { - return nil, false, fmt.Errorf("Failed to connect alicloud ecs %s", err) + return nil, false, false, fmt.Errorf("Failed to connect alicloud ecs %s", err) } ecsClient.SetBusinessInfo(BUSINESSINFO) @@ -157,12 +157,12 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ImageName: p.config.AlicloudImageName, }) if err != nil { - return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", + return nil, false, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err) } if len(images) > 0 && !p.config.AlicloudImageForceDelete { - return nil, false, fmt.Errorf("Duplicated image exists, please delete the existing images " + + return nil, false, false, fmt.Errorf("Duplicated image exists, please delete the existing images " + "or set the 'image_force_delete' value as true") } @@ -171,25 +171,25 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac client, err := oss.New(getEndPonit(p.config.AlicloudRegion), p.config.AlicloudAccessKey, p.config.AlicloudSecretKey) if err != nil { - return nil, false, fmt.Errorf("Creating oss connection failed: %s", err) + return nil, false, false, fmt.Errorf("Creating oss connection failed: %s", err) } bucket, err := queryOrCreateBucket(p.config.OSSBucket, client) if err != nil { - return nil, false, fmt.Errorf("Failed to query or create bucket %s: %s", p.config.OSSBucket, err) + return nil, false, false, fmt.Errorf("Failed to query or create bucket %s: %s", p.config.OSSBucket, err) } if err != nil { - return nil, false, fmt.Errorf("Failed to open %s: %s", source, err) + return nil, false, false, fmt.Errorf("Failed to open %s: %s", source, err) } err = bucket.PutObjectFromFile(p.config.OSSKey, source) if err != nil { - return nil, false, fmt.Errorf("Failed to upload image %s: %s", source, err) + return nil, false, false, fmt.Errorf("Failed to upload image %s: %s", source, err) } if len(images) > 0 && p.config.AlicloudImageForceDelete { if err = ecsClient.DeleteImage(packercommon.Region(p.config.AlicloudRegion), images[0].ImageId); err != nil { - return nil, false, fmt.Errorf("Delete duplicated image %s failed", images[0].ImageName) + return nil, false, false, fmt.Errorf("Delete duplicated image %s failed", images[0].ImageName) } } @@ -221,7 +221,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac RoleName: "AliyunECSImageImportDefaultRole", }) if err != nil { - return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", + return nil, false, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err) } if roleResponse.Role.RoleId == "" { @@ -229,7 +229,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac RoleName: "AliyunECSImageImportDefaultRole", AssumeRolePolicyDocument: AliyunECSImageImportDefaultRolePolicy, }); err != nil { - return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", + return nil, false, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err) } if _, err := ramClient.AttachPolicyToRole(ram.AttachPolicyToRoleRequest{ @@ -239,7 +239,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac }, RoleName: "AliyunECSImageImportDefaultRole", }); err != nil { - return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", + return nil, false, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err) } } else { @@ -247,7 +247,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac RoleName: "AliyunECSImageImportDefaultRole", }) if err != nil { - return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", + return nil, false, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err) } isAliyunECSImageImportRolePolicyNotExit := true @@ -266,7 +266,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac }, RoleName: "AliyunECSImageImportDefaultRole", }); err != nil { - return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", + return nil, false, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err) } } @@ -275,7 +275,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac RoleName: "AliyunECSImageImportDefaultRole", NewAssumeRolePolicyDocument: AliyunECSImageImportDefaultRolePolicy, }); err != nil { - return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", + return nil, false, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err) } } @@ -290,7 +290,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac e.Code == "InvalidImageName.Duplicated" { break } - return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", + return nil, false, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err) } break @@ -298,7 +298,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac } else { - return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", + return nil, false, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err) } } @@ -319,12 +319,12 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Message(fmt.Sprintf("Deleting import source %s/%s/%s", getEndPonit(p.config.AlicloudRegion), p.config.OSSBucket, p.config.OSSKey)) if err = bucket.DeleteObject(p.config.OSSKey); err != nil { - return nil, false, fmt.Errorf("Failed to delete %s/%s/%s: %s", + return nil, false, false, fmt.Errorf("Failed to delete %s/%s/%s: %s", getEndPonit(p.config.AlicloudRegion), p.config.OSSBucket, p.config.OSSKey, err) } } - return artifact, false, nil + return artifact, false, false, nil } func queryOrCreateBucket(bucketName string, client *oss.Client) (*oss.Bucket, error) { diff --git a/post-processor/amazon-import/post-processor.go b/post-processor/amazon-import/post-processor.go index 2fbab390d..c04082f14 100644 --- a/post-processor/amazon-import/post-processor.go +++ b/post-processor/amazon-import/post-processor.go @@ -119,19 +119,19 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { return nil } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { var err error session, err := p.config.Session() if err != nil { - return nil, false, err + return nil, false, false, err } config := session.Config // Render this key since we didn't in the configure phase p.config.S3Key, err = interpolate.Render(p.config.S3Key, &p.config.ctx) if err != nil { - return nil, false, fmt.Errorf("Error rendering s3_key_name template: %s", err) + return nil, false, false, fmt.Errorf("Error rendering s3_key_name template: %s", err) } log.Printf("Rendered s3_key_name as %s", p.config.S3Key) @@ -147,7 +147,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac // Hope we found something useful if source == "" { - return nil, false, fmt.Errorf("No %s image file found in artifact from builder", p.config.Format) + return nil, false, false, fmt.Errorf("No %s image file found in artifact from builder", p.config.Format) } if p.config.S3Encryption == "AES256" && p.config.S3EncryptionKey != "" { @@ -158,7 +158,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac log.Printf("Opening file %s to upload", source) file, err := os.Open(source) if err != nil { - return nil, false, fmt.Errorf("Failed to open %s: %s", source, err) + return nil, false, false, fmt.Errorf("Failed to open %s: %s", source, err) } ui.Message(fmt.Sprintf("Uploading %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key)) @@ -181,7 +181,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac // Copy the image file into the S3 bucket specified uploader := s3manager.NewUploader(session) if _, err = uploader.Upload(updata); err != nil { - return nil, false, fmt.Errorf("Failed to upload %s: %s", source, err) + return nil, false, false, fmt.Errorf("Failed to upload %s: %s", source, err) } // May as well stop holding this open now @@ -222,7 +222,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac import_start, err := ec2conn.ImportImage(params) if err != nil { - return nil, false, fmt.Errorf("Failed to start import from s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err) + return nil, false, false, fmt.Errorf("Failed to start import from s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err) } ui.Message(fmt.Sprintf("Started import of s3://%s/%s, task id %s", p.config.S3Bucket, p.config.S3Key, *import_start.ImportTaskId)) @@ -244,7 +244,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac if err2 == nil { statusMessage = *import_result.ImportImageTasks[0].StatusMessage } - return nil, false, fmt.Errorf("Import task %s failed with status message: %s, error: %s", *import_start.ImportTaskId, statusMessage, err) + return nil, false, false, fmt.Errorf("Import task %s failed with status message: %s, error: %s", *import_start.ImportTaskId, statusMessage, err) } // Retrieve what the outcome was for the import task @@ -255,12 +255,12 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac }) if err != nil { - return nil, false, fmt.Errorf("Failed to find import task %s: %s", *import_start.ImportTaskId, err) + return nil, false, false, fmt.Errorf("Failed to find import task %s: %s", *import_start.ImportTaskId, err) } // Check it was actually completed if *import_result.ImportImageTasks[0].Status != "completed" { // The most useful error message is from the job itself - return nil, false, fmt.Errorf("Import task %s failed: %s", *import_start.ImportTaskId, *import_result.ImportImageTasks[0].StatusMessage) + return nil, false, false, fmt.Errorf("Import task %s failed: %s", *import_start.ImportTaskId, *import_result.ImportImageTasks[0].StatusMessage) } ui.Message(fmt.Sprintf("Import task %s complete", *import_start.ImportTaskId)) @@ -279,13 +279,13 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac }) if err != nil { - return nil, false, fmt.Errorf("Error Copying AMI (%s): %s", createdami, err) + return nil, false, false, fmt.Errorf("Error Copying AMI (%s): %s", createdami, err) } ui.Message(fmt.Sprintf("Waiting for AMI rename to complete (may take a while)")) if err := awscommon.WaitUntilAMIAvailable(aws.BackgroundContext(), ec2conn, *resp.ImageId); err != nil { - return nil, false, fmt.Errorf("Error waiting for AMI (%s): %s", *resp.ImageId, err) + return nil, false, false, fmt.Errorf("Error waiting for AMI (%s): %s", *resp.ImageId, err) } _, err = ec2conn.DeregisterImage(&ec2.DeregisterImageInput{ @@ -293,7 +293,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac }) if err != nil { - return nil, false, fmt.Errorf("Error deregistering existing AMI: %s", err) + return nil, false, false, fmt.Errorf("Error deregistering existing AMI: %s", err) } ui.Message(fmt.Sprintf("AMI rename completed")) @@ -325,11 +325,11 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac }) if err != nil { - return nil, false, fmt.Errorf("Failed to retrieve details for AMI %s: %s", createdami, err) + return nil, false, false, fmt.Errorf("Failed to retrieve details for AMI %s: %s", createdami, err) } if len(imageResp.Images) == 0 { - return nil, false, fmt.Errorf("AMI %s has no images", createdami) + return nil, false, false, fmt.Errorf("AMI %s has no images", createdami) } image := imageResp.Images[0] @@ -351,7 +351,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac }) if err != nil { - return nil, false, fmt.Errorf("Failed to add tags to resources %#v: %s", resourceIds, err) + return nil, false, false, fmt.Errorf("Failed to add tags to resources %#v: %s", resourceIds, err) } } @@ -405,7 +405,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac input.ImageId = &createdami _, err := ec2conn.ModifyImageAttribute(input) if err != nil { - return nil, false, fmt.Errorf("Error modifying AMI attributes: %s", err) + return nil, false, false, fmt.Errorf("Error modifying AMI attributes: %s", err) } } } @@ -428,9 +428,9 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac Key: &p.config.S3Key, }) if err != nil { - return nil, false, fmt.Errorf("Failed to delete s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err) + return nil, false, false, fmt.Errorf("Failed to delete s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err) } } - return artifact, false, nil + return artifact, false, false, nil } diff --git a/post-processor/artifice/post-processor.go b/post-processor/artifice/post-processor.go index 030b52c07..99dbb8beb 100644 --- a/post-processor/artifice/post-processor.go +++ b/post-processor/artifice/post-processor.go @@ -48,7 +48,7 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { return nil } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { if len(artifact.Files()) > 0 { ui.Say(fmt.Sprintf("Discarding artifact files: %s", strings.Join(artifact.Files(), ", "))) } @@ -56,5 +56,5 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac artifact, err := NewArtifact(p.config.Files) ui.Say(fmt.Sprintf("Using these artifact files: %s", strings.Join(artifact.Files(), ", "))) - return artifact, true, err + return artifact, true, false, err } diff --git a/post-processor/checksum/post-processor.go b/post-processor/checksum/post-processor.go index 70295c878..6c4e9b66a 100644 --- a/post-processor/checksum/post-processor.go +++ b/post-processor/checksum/post-processor.go @@ -95,7 +95,7 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { return nil } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { files := artifact.Files() var h hash.Hash @@ -113,29 +113,29 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac for _, art := range files { checksumFile, err := interpolate.Render(p.config.OutputPath, &p.config.ctx) if err != nil { - return nil, false, err + return nil, false, true, err } if _, err := os.Stat(checksumFile); err != nil { newartifact.files = append(newartifact.files, checksumFile) } if err := os.MkdirAll(filepath.Dir(checksumFile), os.FileMode(0755)); err != nil { - return nil, false, fmt.Errorf("unable to create dir: %s", err.Error()) + return nil, false, true, fmt.Errorf("unable to create dir: %s", err.Error()) } fw, err := os.OpenFile(checksumFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(0644)) if err != nil { - return nil, false, fmt.Errorf("unable to create file %s: %s", checksumFile, err.Error()) + return nil, false, true, fmt.Errorf("unable to create file %s: %s", checksumFile, err.Error()) } fr, err := os.Open(art) if err != nil { fw.Close() - return nil, false, fmt.Errorf("unable to open file %s: %s", art, err.Error()) + return nil, false, true, fmt.Errorf("unable to open file %s: %s", art, err.Error()) } if _, err = io.Copy(h, fr); err != nil { fr.Close() fw.Close() - return nil, false, fmt.Errorf("unable to compute %s hash for %s", ct, art) + return nil, false, true, fmt.Errorf("unable to compute %s hash for %s", ct, art) } fr.Close() fw.WriteString(fmt.Sprintf("%x\t%s\n", h.Sum(nil), filepath.Base(art))) @@ -144,5 +144,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac } } - return newartifact, true, nil + // sets keep and forceOverride to true because we don't want to accidentally + // delete the very artifact we're checksumming. + return newartifact, true, true, nil } diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 5ac213a76..4d19d6bfd 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -100,7 +100,7 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { return nil } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { // These are extra variables that will be made available for interpolation. p.config.ctx.Data = map[string]string{ @@ -110,7 +110,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac target, err := interpolate.Render(p.config.OutputPath, &p.config.ctx) if err != nil { - return nil, false, fmt.Errorf("Error interpolating output value: %s", err) + return nil, false, false, fmt.Errorf("Error interpolating output value: %s", err) } else { fmt.Println(target) } @@ -119,12 +119,12 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac newArtifact := &Artifact{Path: target} if err = os.MkdirAll(filepath.Dir(target), os.FileMode(0755)); err != nil { - return nil, false, fmt.Errorf( + return nil, false, false, fmt.Errorf( "Unable to create dir for archive %s: %s", target, err) } outputFile, err := os.Create(target) if err != nil { - return nil, false, fmt.Errorf( + return nil, false, false, fmt.Errorf( "Unable to create archive %s: %s", target, err) } defer outputFile.Close() @@ -168,19 +168,19 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Say(fmt.Sprintf("Tarring %s with %s", target, compression)) err = createTarArchive(artifact.Files(), output) if err != nil { - return nil, keep, fmt.Errorf("Error creating tar: %s", err) + return nil, keep, false, fmt.Errorf("Error creating tar: %s", err) } case "zip": ui.Say(fmt.Sprintf("Zipping %s", target)) err = createZipArchive(artifact.Files(), output) if err != nil { - return nil, keep, fmt.Errorf("Error creating zip: %s", err) + return nil, keep, false, fmt.Errorf("Error creating zip: %s", err) } default: // Filename indicates no tarball (just compress) so we'll do an io.Copy // into our compressor. if len(artifact.Files()) != 1 { - return nil, keep, fmt.Errorf( + return nil, keep, false, fmt.Errorf( "Can only have 1 input file when not using tar/zip. Found %d "+ "files: %v", len(artifact.Files()), artifact.Files()) } @@ -189,21 +189,21 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac source, err := os.Open(archiveFile) if err != nil { - return nil, keep, fmt.Errorf( + return nil, keep, false, fmt.Errorf( "Failed to open source file %s for reading: %s", archiveFile, err) } defer source.Close() if _, err = io.Copy(output, source); err != nil { - return nil, keep, fmt.Errorf("Failed to compress %s: %s", + return nil, keep, false, fmt.Errorf("Failed to compress %s: %s", archiveFile, err) } } ui.Say(fmt.Sprintf("Archive %s completed", target)) - return newArtifact, keep, nil + return newArtifact, keep, false, nil } func (config *Config) detectFromFilename() { diff --git a/post-processor/digitalocean-import/post-processor.go b/post-processor/digitalocean-import/post-processor.go index d7dcd867b..70d1fe44d 100644 --- a/post-processor/digitalocean-import/post-processor.go +++ b/post-processor/digitalocean-import/post-processor.go @@ -136,12 +136,12 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { return nil } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { var err error p.config.ObjectName, err = interpolate.Render(p.config.ObjectName, &p.config.ctx) if err != nil { - return nil, false, fmt.Errorf("Error rendering space_object_name template: %s", err) + return nil, false, false, fmt.Errorf("Error rendering space_object_name template: %s", err) } log.Printf("Rendered space_object_name as %s", p.config.ObjectName) @@ -166,7 +166,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac } if source == "" { - return nil, false, fmt.Errorf("Image file not found") + return nil, false, false, fmt.Errorf("Image file not found") } spacesCreds := credentials.NewStaticCredentials(p.config.SpacesKey, p.config.SpacesSecret, "") @@ -185,7 +185,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Message(fmt.Sprintf("Uploading %s to spaces://%s/%s", source, p.config.SpaceName, p.config.ObjectName)) err = uploadImageToSpaces(source, p, sess) if err != nil { - return nil, false, err + return nil, false, false, err } ui.Message(fmt.Sprintf("Completed upload of %s to spaces://%s/%s", source, p.config.SpaceName, p.config.ObjectName)) @@ -196,13 +196,13 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Message(fmt.Sprintf("Started import of spaces://%s/%s", p.config.SpaceName, p.config.ObjectName)) image, err := importImageFromSpaces(p, client) if err != nil { - return nil, false, err + return nil, false, false, err } ui.Message(fmt.Sprintf("Waiting for import of image %s to complete (may take a while)", p.config.Name)) err = waitUntilImageAvailable(client, image.ID, p.config.Timeout) if err != nil { - return nil, false, fmt.Errorf("Import of image %s failed with error: %s", p.config.Name, err) + return nil, false, false, fmt.Errorf("Import of image %s failed with error: %s", p.config.Name, err) } ui.Message(fmt.Sprintf("Import of image %s complete", p.config.Name)) @@ -216,7 +216,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Message(fmt.Sprintf("Distributing image %s to additional regions: %v", p.config.Name, regions)) err = distributeImageToRegions(client, image.ID, regions, p.config.Timeout) if err != nil { - return nil, false, err + return nil, false, false, err } } @@ -232,11 +232,11 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Message(fmt.Sprintf("Deleting import source spaces://%s/%s", p.config.SpaceName, p.config.ObjectName)) err = deleteImageFromSpaces(p, sess) if err != nil { - return nil, false, err + return nil, false, false, err } } - return artifact, false, nil + return artifact, false, false, nil } func uploadImageToSpaces(source string, p *PostProcessor, s *session.Session) (err error) { diff --git a/post-processor/docker-import/post-processor.go b/post-processor/docker-import/post-processor.go index eac920f06..0c74c53a2 100644 --- a/post-processor/docker-import/post-processor.go +++ b/post-processor/docker-import/post-processor.go @@ -43,7 +43,7 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { switch artifact.BuilderId() { case docker.BuilderId, artifice.BuilderId: break @@ -51,7 +51,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac err := fmt.Errorf( "Unknown artifact type: %s\nCan only import from Docker builder and Artifice post-processor artifacts.", artifact.BuilderId()) - return nil, false, err + return nil, false, false, err } importRepo := p.config.Repository @@ -65,7 +65,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Message("Repository: " + importRepo) id, err := driver.Import(artifact.Files()[0], p.config.Changes, importRepo) if err != nil { - return nil, false, err + return nil, false, false, err } ui.Message("Imported ID: " + id) @@ -77,5 +77,5 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac IdValue: importRepo, } - return artifact, false, nil + return artifact, false, false, nil } diff --git a/post-processor/docker-push/post-processor.go b/post-processor/docker-push/post-processor.go index 2ffc3e5a5..92a9bc61a 100644 --- a/post-processor/docker-push/post-processor.go +++ b/post-processor/docker-push/post-processor.go @@ -51,13 +51,13 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { return nil } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { if artifact.BuilderId() != dockerimport.BuilderId && artifact.BuilderId() != dockertag.BuilderId { err := fmt.Errorf( "Unknown artifact type: %s\nCan only import from docker-import and docker-tag artifacts.", artifact.BuilderId()) - return nil, false, err + return nil, false, false, err } driver := p.Driver @@ -71,7 +71,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac username, password, err := p.config.EcrGetLogin(p.config.LoginServer) if err != nil { - return nil, false, err + return nil, false, false, err } p.config.LoginUsername = username @@ -85,7 +85,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac p.config.LoginUsername, p.config.LoginPassword) if err != nil { - return nil, false, fmt.Errorf( + return nil, false, false, fmt.Errorf( "Error logging in to Docker: %s", err) } @@ -102,7 +102,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Message("Pushing: " + name) if err := driver.Push(name); err != nil { - return nil, false, err + return nil, false, false, err } artifact = &docker.ImportArtifact{ @@ -111,5 +111,5 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac IdValue: name, } - return artifact, true, nil + return artifact, true, false, nil } diff --git a/post-processor/docker-save/post-processor.go b/post-processor/docker-save/post-processor.go index 6cb348bf8..47eef8250 100644 --- a/post-processor/docker-save/post-processor.go +++ b/post-processor/docker-save/post-processor.go @@ -45,13 +45,13 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { if artifact.BuilderId() != dockerimport.BuilderId && artifact.BuilderId() != dockertag.BuilderId { err := fmt.Errorf( "Unknown artifact type: %s\nCan only save Docker builder artifacts.", artifact.BuilderId()) - return nil, false, err + return nil, false, false, err } path := p.config.Path @@ -60,7 +60,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac f, err := os.Create(path) if err != nil { err := fmt.Errorf("Error creating output file: %s", err) - return nil, false, err + return nil, false, false, err } driver := p.Driver @@ -75,11 +75,11 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac f.Close() os.Remove(f.Name()) - return nil, false, err + return nil, false, false, err } f.Close() ui.Message("Saved to: " + path) - return artifact, true, nil + return artifact, true, false, nil } diff --git a/post-processor/docker-tag/post-processor.go b/post-processor/docker-tag/post-processor.go index e9311d12f..d915f6a66 100644 --- a/post-processor/docker-tag/post-processor.go +++ b/post-processor/docker-tag/post-processor.go @@ -45,13 +45,13 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { if artifact.BuilderId() != BuilderId && artifact.BuilderId() != dockerimport.BuilderId { err := fmt.Errorf( "Unknown artifact type: %s\nCan only tag from Docker builder artifacts.", artifact.BuilderId()) - return nil, false, err + return nil, false, true, err } driver := p.Driver @@ -69,7 +69,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Message("Repository: " + importRepo) err := driver.TagImage(artifact.Id(), importRepo, p.config.Force) if err != nil { - return nil, false, err + return nil, false, true, err } // Build the artifact @@ -79,5 +79,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac IdValue: importRepo, } - return artifact, true, nil + // If we tag an image and then delete it, there was no point in creating the + // tag. Override users to force us to always keep the input artifact. + return artifact, true, true, nil } diff --git a/post-processor/googlecompute-export/post-processor.go b/post-processor/googlecompute-export/post-processor.go index de56eb7cb..0d11b0c85 100644 --- a/post-processor/googlecompute-export/post-processor.go +++ b/post-processor/googlecompute-export/post-processor.go @@ -75,12 +75,12 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { return nil } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { if artifact.BuilderId() != googlecompute.BuilderId { err := fmt.Errorf( "Unknown artifact type: %s\nCan only export from Google Compute Engine builder artifacts.", artifact.BuilderId()) - return nil, p.config.KeepOriginalImage, err + return nil, p.config.KeepOriginalImage, false, err } builderAccountFile := artifact.State("AccountFilePath").(string) @@ -98,13 +98,13 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac if builderAccountFile != "" { err := googlecompute.ProcessAccountFile(&p.config.Account, builderAccountFile) if err != nil { - return nil, p.config.KeepOriginalImage, err + return nil, p.config.KeepOriginalImage, false, err } } if p.config.AccountFile != "" { err := googlecompute.ProcessAccountFile(&p.config.Account, p.config.AccountFile) if err != nil { - return nil, p.config.KeepOriginalImage, err + return nil, p.config.KeepOriginalImage, false, err } } @@ -141,7 +141,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac driver, err := googlecompute.NewDriverGCE(ui, builderProjectId, &p.config.Account) if err != nil { - return nil, p.config.KeepOriginalImage, err + return nil, p.config.KeepOriginalImage, false, err } // Set up the state. @@ -169,5 +169,5 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac result := &Artifact{paths: p.config.Paths} - return result, p.config.KeepOriginalImage, nil + return result, p.config.KeepOriginalImage, false, nil } diff --git a/post-processor/googlecompute-import/post-processor.go b/post-processor/googlecompute-import/post-processor.go index 4dfb1fee9..9ff930f1c 100644 --- a/post-processor/googlecompute-import/post-processor.go +++ b/post-processor/googlecompute-import/post-processor.go @@ -94,42 +94,42 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { return nil } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { client, err := googlecompute.NewClientGCE(&p.config.Account) if err != nil { - return nil, false, err + return nil, false, false, err } if artifact.BuilderId() != compress.BuilderId { err = fmt.Errorf( "incompatible artifact type: %s\nCan only import from Compress post-processor artifacts", artifact.BuilderId()) - return nil, false, err + return nil, false, false, err } p.config.GCSObjectName, err = interpolate.Render(p.config.GCSObjectName, &p.config.ctx) if err != nil { - return nil, false, fmt.Errorf("Error rendering gcs_object_name template: %s", err) + return nil, false, false, fmt.Errorf("Error rendering gcs_object_name template: %s", err) } rawImageGcsPath, err := UploadToBucket(client, ui, artifact, p.config.Bucket, p.config.GCSObjectName) if err != nil { - return nil, p.config.KeepOriginalImage, err + return nil, p.config.KeepOriginalImage, false, err } gceImageArtifact, err := CreateGceImage(client, ui, p.config.ProjectId, rawImageGcsPath, p.config.ImageName, p.config.ImageDescription, p.config.ImageFamily, p.config.ImageLabels, p.config.ImageGuestOsFeatures) if err != nil { - return nil, p.config.KeepOriginalImage, err + return nil, p.config.KeepOriginalImage, false, err } if !p.config.SkipClean { err = DeleteFromBucket(client, ui, p.config.Bucket, p.config.GCSObjectName) if err != nil { - return nil, p.config.KeepOriginalImage, err + return nil, p.config.KeepOriginalImage, false, err } } - return gceImageArtifact, p.config.KeepOriginalImage, nil + return gceImageArtifact, p.config.KeepOriginalImage, false, nil } func UploadToBucket(client *http.Client, ui packer.Ui, artifact packer.Artifact, bucket string, gcsObjectName string) (string, error) { diff --git a/post-processor/manifest/post-processor.go b/post-processor/manifest/post-processor.go index 8833f2abe..5611ef4ab 100644 --- a/post-processor/manifest/post-processor.go +++ b/post-processor/manifest/post-processor.go @@ -56,7 +56,7 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { return nil } -func (p *PostProcessor) PostProcess(ui packer.Ui, source packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, source packer.Artifact) (packer.Artifact, bool, bool, error) { artifact := &Artifact{} var err error @@ -106,14 +106,14 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, source packer.Artifact) (packe // Read the current manifest file from disk contents := []byte{} if contents, err = ioutil.ReadFile(p.config.OutputPath); err != nil && !os.IsNotExist(err) { - return source, true, fmt.Errorf("Unable to open %s for reading: %s", p.config.OutputPath, err) + return source, true, true, fmt.Errorf("Unable to open %s for reading: %s", p.config.OutputPath, err) } // Parse the manifest file JSON, if we have one manifestFile := &ManifestFile{} if len(contents) > 0 { if err = json.Unmarshal(contents, manifestFile); err != nil { - return source, true, fmt.Errorf("Unable to parse content from %s: %s", p.config.OutputPath, err) + return source, true, true, fmt.Errorf("Unable to parse content from %s: %s", p.config.OutputPath, err) } } @@ -130,11 +130,13 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, source packer.Artifact) (packe // Write JSON to disk if out, err := json.MarshalIndent(manifestFile, "", " "); err == nil { if err = ioutil.WriteFile(p.config.OutputPath, out, 0664); err != nil { - return source, true, fmt.Errorf("Unable to write %s: %s", p.config.OutputPath, err) + return source, true, true, fmt.Errorf("Unable to write %s: %s", p.config.OutputPath, err) } } else { - return source, true, fmt.Errorf("Unable to marshal JSON %s", err) + return source, true, true, fmt.Errorf("Unable to marshal JSON %s", err) } - return source, true, nil + // The manifest should never delete the artifacts it is set to record, so it + // forcibly sets "keep" to true. + return source, true, true, nil } diff --git a/post-processor/shell-local/post-processor.go b/post-processor/shell-local/post-processor.go index c761a19f4..a4b53f95b 100644 --- a/post-processor/shell-local/post-processor.go +++ b/post-processor/shell-local/post-processor.go @@ -35,14 +35,14 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { return sl.Validate(&p.config) } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { // this particular post-processor doesn't do anything with the artifact // except to return it. retBool, retErr := sl.Run(ui, &p.config) if !retBool { - return nil, retBool, retErr + return nil, retBool, false, retErr } - return artifact, retBool, retErr + return artifact, retBool, false, retErr } diff --git a/post-processor/vagrant-cloud/post-processor.go b/post-processor/vagrant-cloud/post-processor.go index e11e8df8e..ff6ddc646 100644 --- a/post-processor/vagrant-cloud/post-processor.go +++ b/post-processor/vagrant-cloud/post-processor.go @@ -117,15 +117,15 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { return nil } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { if _, ok := builtins[artifact.BuilderId()]; !ok { - return nil, false, fmt.Errorf( + return nil, false, false, fmt.Errorf( "Unknown artifact type, requires box from vagrant post-processor or vagrant builder: %s", artifact.BuilderId()) } // We assume that there is only one .box file to upload if !strings.HasSuffix(artifact.Files()[0], ".box") { - return nil, false, fmt.Errorf( + return nil, false, false, fmt.Errorf( "Unknown files in artifact, vagrant box is required: %s", artifact.Files()) } @@ -142,7 +142,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac } boxDownloadUrl, err := interpolate.Render(p.config.BoxDownloadUrl, &p.config.ctx) if err != nil { - return nil, false, fmt.Errorf("Error processing box_download_url: %s", err) + return nil, false, false, fmt.Errorf("Error processing box_download_url: %s", err) } // Set up the state @@ -181,10 +181,10 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac // If there was an error, return that if rawErr, ok := state.GetOk("error"); ok { - return nil, false, rawErr.(error) + return nil, false, false, rawErr.(error) } - return NewArtifact(providerName, p.config.Tag), true, nil + return NewArtifact(providerName, p.config.Tag), true, false, nil } // Runs a cleanup if the post processor fails to upload diff --git a/post-processor/vagrant/post-processor.go b/post-processor/vagrant/post-processor.go index 72c2ab440..29ec857d2 100644 --- a/post-processor/vagrant/post-processor.go +++ b/post-processor/vagrant/post-processor.go @@ -158,11 +158,11 @@ func (p *PostProcessor) PostProcessProvider(name string, provider Provider, ui p return NewArtifact(name, outputPath), provider.KeepInputArtifact(), nil } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { name, ok := builtins[artifact.BuilderId()] if !ok { - return nil, false, fmt.Errorf( + return nil, false, false, fmt.Errorf( "Unknown artifact type, can't build box: %s", artifact.BuilderId()) } @@ -172,7 +172,14 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac panic(fmt.Sprintf("bad provider name: %s", name)) } - return p.PostProcessProvider(name, provider, ui, artifact) + artifact, keep, err := p.PostProcessProvider(name, provider, ui, artifact) + + // In some cases, (e.g. AMI), deleting the input artifact would render the + // resulting vagrant box useless. Because of these cases, we want to + // forcibly set keep_input_artifact. + + // TODO: rework all provisioners to only forcibly keep those where it matters + return artifact, keep, true, err } func (p *PostProcessor) configureSingle(c *Config, raws ...interface{}) error { diff --git a/post-processor/vsphere-template/post-processor.go b/post-processor/vsphere-template/post-processor.go index 9f0b7f271..908fa7664 100644 --- a/post-processor/vsphere-template/post-processor.go +++ b/post-processor/vsphere-template/post-processor.go @@ -91,9 +91,9 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { return nil } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { if _, ok := builtins[artifact.BuilderId()]; !ok { - return nil, false, fmt.Errorf("The Packer vSphere Template post-processor "+ + return nil, false, false, fmt.Errorf("The Packer vSphere Template post-processor "+ "can only take an artifact from the VMware-iso builder, built on "+ "ESXi (i.e. remote) or an artifact from the vSphere post-processor. "+ "Artifact type %s does not fit this requirement", artifact.BuilderId()) @@ -104,7 +104,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac s := artifact.State(vmwcommon.ArtifactConfSkipExport) if f != "" && k != "true" && s == "false" { - return nil, false, errors.New("To use this post-processor with exporting behavior you need set keep_registered as true") + return nil, false, false, errors.New("To use this post-processor with exporting behavior you need set keep_registered as true") } // In some occasions the VM state is powered on and if we immediately try to mark as template @@ -113,7 +113,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac time.Sleep(10 * time.Second) c, err := govmomi.NewClient(context.Background(), p.url, p.config.Insecure) if err != nil { - return nil, false, fmt.Errorf("Error connecting to vSphere: %s", err) + return nil, false, false, fmt.Errorf("Error connecting to vSphere: %s", err) } defer c.Logout(context.Background()) @@ -135,7 +135,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac runner := common.NewRunnerWithPauseFn(steps, p.config.PackerConfig, ui, state) runner.Run(state) if rawErr, ok := state.GetOk("error"); ok { - return nil, false, rawErr.(error) + return nil, false, false, rawErr.(error) } - return artifact, true, nil + return artifact, true, true, nil } diff --git a/post-processor/vsphere/post-processor.go b/post-processor/vsphere/post-processor.go index 4102b4138..1f3f5d83d 100644 --- a/post-processor/vsphere/post-processor.go +++ b/post-processor/vsphere/post-processor.go @@ -114,9 +114,9 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { return nil } -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) { if _, ok := builtins[artifact.BuilderId()]; !ok { - return nil, false, fmt.Errorf("Unknown artifact type, can't build box: %s", artifact.BuilderId()) + return nil, false, false, fmt.Errorf("Unknown artifact type, can't build box: %s", artifact.BuilderId()) } source := "" @@ -128,7 +128,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac } if source == "" { - return nil, false, fmt.Errorf("VMX, OVF or OVA file not found") + return nil, false, false, fmt.Errorf("VMX, OVF or OVA file not found") } password := escapeWithSpaces(p.config.Password) @@ -174,14 +174,14 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac if err := cmd.Run(); err != nil { err := fmt.Errorf("Error uploading virtual machine: %s\n%s\n", err, p.filterLog(errOut.String())) - return nil, false, err + return nil, false, false, err } ui.Message(p.filterLog(errOut.String())) artifact = NewArtifact(p.config.Datastore, p.config.VMFolder, p.config.VMName, artifact.Files()) - return artifact, false, nil + return artifact, false, false, nil } func (p *PostProcessor) filterLog(s string) string { diff --git a/template/template.go b/template/template.go index 6f978a5db..98e8a7eaf 100644 --- a/template/template.go +++ b/template/template.go @@ -111,7 +111,7 @@ type PostProcessor struct { Name string `json:"name,omitempty"` Type string `json:"type"` - KeepInputArtifact bool `mapstructure:"keep_input_artifact" json:"keep_input_artifact,omitempty"` + KeepInputArtifact *bool `mapstructure:"keep_input_artifact" json:"keep_input_artifact,omitempty"` Config map[string]interface{} `json:"config,omitempty"` } @@ -119,7 +119,7 @@ type PostProcessor struct { // to provide valid Packer template JSON func (p *PostProcessor) MarshalJSON() ([]byte, error) { // Early exit for simple definitions - if len(p.Config) == 0 && len(p.OnlyExcept.Only) == 0 && len(p.OnlyExcept.Except) == 0 && !p.KeepInputArtifact { + if len(p.Config) == 0 && len(p.OnlyExcept.Only) == 0 && len(p.OnlyExcept.Except) == 0 && p.KeepInputArtifact == nil { return json.Marshal(p.Type) } From d0eb2609a852ff7b80e2f3c93f48da613acb10d5 Mon Sep 17 00:00:00 2001 From: John Jones Date: Tue, 2 Apr 2019 21:17:08 -0700 Subject: [PATCH 16/47] Ansible 2,7 "use_tty" fix Per Issue #6453, Ansible@2.7 targetting Windows was throwing the following error when setup as the docs instruct: "Requested option use_tty was not defined in configuration" This is caused by something in Ansible silently discarding the whole `DOCUMENTATION` string configuration when it can't parse it. The changed line in this PR fixes that. :) --- website/source/docs/provisioners/ansible.html.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/website/source/docs/provisioners/ansible.html.md b/website/source/docs/provisioners/ansible.html.md index 7ec4941c9..b5b68fe94 100644 --- a/website/source/docs/provisioners/ansible.html.md +++ b/website/source/docs/provisioners/ansible.html.md @@ -262,8 +262,7 @@ DOCUMENTATION = ''' connection: packer short_description: ssh based connections for powershell via packer description: - - This connection plugin allows ansible to communicate to the target packer - machines via ssh based connections for powershell. + - This connection plugin allows ansible to communicate to the target packer machines via ssh based connections for powershell. author: Packer version_added: na options: From 9836dad0c937dbed2dee0f661f9273912d0d7626 Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Wed, 3 Apr 2019 10:27:05 +0200 Subject: [PATCH 17/47] introduce the clean_resource_name to clean image names and deprecate the old ones --- builder/amazon/common/ami_config.go | 2 +- builder/amazon/common/template_funcs.go | 5 +- builder/azure/arm/template_funcs.go | 5 +- builder/googlecompute/template_funcs.go | 5 +- common/template/funcs.go | 19 +++++++ website/source/docs/templates/engine.html.md | 55 ++++++++++++++------ 6 files changed, 71 insertions(+), 20 deletions(-) create mode 100644 common/template/funcs.go diff --git a/builder/amazon/common/ami_config.go b/builder/amazon/common/ami_config.go index 1c00eee4d..3e858d1f3 100644 --- a/builder/amazon/common/ami_config.go +++ b/builder/amazon/common/ami_config.go @@ -101,7 +101,7 @@ func (c *AMIConfig) Prepare(accessConfig *AccessConfig, ctx *interpolate.Context errs = append(errs, fmt.Errorf("AMIName should only contain "+ "alphanumeric characters, parentheses (()), square brackets ([]), spaces "+ "( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs "+ - "(@), or underscores(_). You can use the `clean_ami_name` template "+ + "(@), or underscores(_). You can use the `clean_resource_name` template "+ "filter to automatically clean your ami name.")) } diff --git a/builder/amazon/common/template_funcs.go b/builder/amazon/common/template_funcs.go index 7a0998b34..246a288e0 100644 --- a/builder/amazon/common/template_funcs.go +++ b/builder/amazon/common/template_funcs.go @@ -3,6 +3,8 @@ package common import ( "bytes" "text/template" + + packertpl "github.com/hashicorp/packer/common/template" ) func isalphanumeric(b byte) bool { @@ -36,5 +38,6 @@ func templateCleanAMIName(s string) string { } var TemplateFuncs = template.FuncMap{ - "clean_ami_name": templateCleanAMIName, + "clean_resource_name": templateCleanAMIName, + "clean_ami_name": packertpl.DeprecatedTemplateFunc("clean_ami_name", "clean_resource_name", templateCleanAMIName), } diff --git a/builder/azure/arm/template_funcs.go b/builder/azure/arm/template_funcs.go index 7e0d6601f..1aa9546ab 100644 --- a/builder/azure/arm/template_funcs.go +++ b/builder/azure/arm/template_funcs.go @@ -3,6 +3,8 @@ package arm import ( "bytes" "text/template" + + packertpl "github.com/hashicorp/packer/common/template" ) func isValidByteValue(b byte) bool { @@ -39,5 +41,6 @@ func templateCleanImageName(s string) string { } var TemplateFuncs = template.FuncMap{ - "clean_image_name": templateCleanImageName, + "clean_resource_name": templateCleanImageName, + "clean_image_name": packertpl.DeprecatedTemplateFunc("clean_image_name", "clean_resource_name", templateCleanImageName), } diff --git a/builder/googlecompute/template_funcs.go b/builder/googlecompute/template_funcs.go index 1e0c654c2..14d1c4b2f 100644 --- a/builder/googlecompute/template_funcs.go +++ b/builder/googlecompute/template_funcs.go @@ -3,6 +3,8 @@ package googlecompute import ( "strings" "text/template" + + packertpl "github.com/hashicorp/packer/common/template" ) func isalphanumeric(b byte) bool { @@ -34,5 +36,6 @@ func templateCleanImageName(s string) string { } var TemplateFuncs = template.FuncMap{ - "clean_image_name": templateCleanImageName, + "clean_resource_name": templateCleanImageName, + "clean_image_name": packertpl.DeprecatedTemplateFunc("clean_image_name", "clean_resource_name", templateCleanImageName), } diff --git a/common/template/funcs.go b/common/template/funcs.go new file mode 100644 index 000000000..bf1de23c6 --- /dev/null +++ b/common/template/funcs.go @@ -0,0 +1,19 @@ +package template + +import ( + "log" + "sync" +) + +// DeprecatedTemplateFunc wraps a template func to warn users that it's +// deprecated. The deprecation warning is called only once. +func DeprecatedTemplateFunc(funcName, useInstead string, deprecated func(string) string) func(string) string { + once := sync.Once{} + return func(in string) string { + once.Do(func() { + log.Printf("[WARN]: the `%s` template func is deprecated, please use %s instead", + funcName, useInstead) + }) + return deprecated(in) + } +} diff --git a/website/source/docs/templates/engine.html.md b/website/source/docs/templates/engine.html.md index 1cebe5fc5..a16182547 100644 --- a/website/source/docs/templates/engine.html.md +++ b/website/source/docs/templates/engine.html.md @@ -27,10 +27,10 @@ The syntax of templates uses the following conventions: Functions perform operations on and within strings, for example the `{{timestamp}}` function can be used in any string to generate the current timestamp. This is useful for configurations that require unique keys, such as -AMI names. By setting the AMI name to something like -`My Packer AMI {{timestamp}}`, the AMI name will be unique down to the second. -If you need greater than one second granularity, you should use `{{uuid}}`, for -example when you have multiple builders in the same template. +AMI names. By setting the AMI name to something like `My Packer AMI +{{timestamp}}`, the AMI name will be unique down to the second. If you need +greater than one second granularity, you should use `{{uuid}}`, for example +when you have multiple builders in the same template. Here is a full list of the available functions for reference. @@ -54,18 +54,41 @@ Here is a full list of the available functions for reference. - `upper` - Uppercases the string. - `user` - Specifies a user variable. - `packer_version` - Returns Packer version. +- `clean_resource_name` - Image names can only contain certain characters and + have a maximum length, eg 63 on GCE & 80 on Azure. `clean_resource_name` + will convert upper cases to lower cases and replace illegal characters with + a "-" character. Example: + + `"mybuild-{{isotime | clean_image_name}}"` will become + `mybuild-2017-10-18t02-06-30z`. + + Note: Valid Azure image names must match the regex + `^[^_\\W][\\w-._)]{0,79}$` + + Note: Valid GCE image names must match the regex + `(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)` + + This engine does not guarantee that the final image name will match the + regex; it will not truncate your name if it exceeds the maximum number of + allowed characters, and it will not validate that the beginning and end of + the engine's output are valid. For example, `"image_name": {{isotime | + clean_resource_name}}"` will cause your build to fail because the image + name will start with a number, which is why in the above example we prepend + the isotime with "mybuild". #### Specific to Amazon builders: -- `clean_ami_name` - AMI names can only contain certain characters. This - function will replace illegal characters with a '-" character. Example - usage since ":" is not a legal AMI name is: `{{isotime | clean_ami_name}}`. +- `clean_ami_name` - DEPRECATED use `clean_resource_name` instead - AMI names + can only contain certain characters. This function will replace illegal + characters with a '-" character. Example usage since ":" is not a legal AMI + name is: `{{isotime | clean_ami_name}}`. #### Specific to Google Compute builders: -- `clean_image_name` - GCE image names can only contain certain characters - and the maximum length is 63. This function will convert upper cases to - lower cases and replace illegal characters with a "-" character. Example: +- `clean_image_name` - DEPRECATED use `clean_resource_name` instead - GCE + image names can only contain certain characters and the maximum length is + 63. This function will convert upper cases to lower cases and replace + illegal characters with a "-" character. Example: `"mybuild-{{isotime | clean_image_name}}"` will become `mybuild-2017-10-18t02-06-30z`. @@ -82,9 +105,10 @@ Here is a full list of the available functions for reference. #### Specific to Azure builders: -- `clean_image_name` - Azure managed image names can only contain certain - characters and the maximum length is 80. This function will replace illegal - characters with a "-" character. Example: +- `clean_image_name` - DEPRECATED use `clean_resource_name` instead - Azure + managed image names can only contain certain characters and the maximum + length is 80. This function will replace illegal characters with a "-" + character. Example: `"mybuild-{{isotime | clean_image_name}}"` will become `mybuild-2017-10-18t02-06-30z`. @@ -96,9 +120,8 @@ Here is a full list of the available functions for reference. regex; it will not truncate your name if it exceeds 80 characters, and it will not validate that the beginning and end of the engine's output are valid. It will truncate invalid characters from the end of the name when - converting illegal characters. For example, - `"managed_image_name: "My-Name::"` will be converted to - `"managed_image_name: "My-Name"` + converting illegal characters. For example, `"managed_image_name: + "My-Name::"` will be converted to `"managed_image_name: "My-Name"` ## Template variables From 365b32eb9c8a29e36d8a76d2338550dfec37fe3d Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 3 Apr 2019 09:43:39 -0700 Subject: [PATCH 18/47] goofing --- packer/build.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packer/build.go b/packer/build.go index d2725035d..af9a307f8 100644 --- a/packer/build.go +++ b/packer/build.go @@ -309,7 +309,8 @@ PostProcessorRunSeqLoop: } else { log.Printf("Deleting prior artifact from post-processor '%s'", corePP.processorType) if err := priorArtifact.Destroy(); err != nil { - errors = append(errors, fmt.Errorf("Failed cleaning up prior artifact: %s", err)) + log.Printf("Error is %#v", err) + errors = append(errors, fmt.Errorf("Failed cleaning up prior artifact: %s; pp is %s", err, corePP.processorType)) } } } From a358b174a4d236c3927277719ac8590a571507de Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 3 Apr 2019 10:43:41 -0700 Subject: [PATCH 19/47] make sure we filter the ui as well as the logs --- packer/ui.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/packer/ui.go b/packer/ui.go index 0e803efd9..2ee866d2b 100644 --- a/packer/ui.go +++ b/packer/ui.go @@ -247,6 +247,13 @@ func (rw *BasicUi) Message(message string) { rw.l.Lock() defer rw.l.Unlock() + // Use LogSecretFilter to scrub out sensitive variables + for s := range LogSecretFilter.s { + if s != "" { + message = strings.Replace(message, s, "", -1) + } + } + log.Printf("ui: %s", message) _, err := fmt.Fprint(rw.Writer, message+"\n") if err != nil { @@ -263,6 +270,13 @@ func (rw *BasicUi) Error(message string) { writer = rw.Writer } + // Use LogSecretFilter to scrub out sensitive variables + for s := range LogSecretFilter.s { + if s != "" { + message = strings.Replace(message, s, "", -1) + } + } + log.Printf("ui error: %s", message) _, err := fmt.Fprint(writer, message+"\n") if err != nil { From c1960840e3528bd760b82d8dfbf737d7908b9358 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 3 Apr 2019 11:01:52 -0700 Subject: [PATCH 20/47] fix tests --- packer/core_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packer/core_test.go b/packer/core_test.go index 1d6c87e4c..a2ad81cf4 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -626,6 +626,9 @@ func TestSensitiveVars(t *testing.T) { if filtered[0] != tc.Expected && len(filtered) != 1 { t.Fatalf("not filtering sensitive vars; filtered is %#v", filtered) } + + // clear filter so it doesn't break other tests + LogSecretFilter.s = make(map[string]struct{}) } } From 056fcb7ceaec7170035feefb2aea26b4ac79d967 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 3 Apr 2019 11:32:49 -0700 Subject: [PATCH 21/47] fix tests and add a few new ones --- packer/build_test.go | 76 ++++++++++++++++--- packer/plugin/post_processor_test.go | 4 +- packer/rpc/post_processor_test.go | 6 +- .../checksum/post-processor_test.go | 2 +- .../compress/post-processor_test.go | 2 +- .../docker-push/post-processor_test.go | 15 +++- .../docker-tag/post-processor_test.go | 10 ++- post-processor/vagrant/post-processor_test.go | 4 +- template/parse_test.go | 6 +- 9 files changed, 101 insertions(+), 24 deletions(-) diff --git a/packer/build_test.go b/packer/build_test.go index 057de0127..5300a7fdd 100644 --- a/packer/build_test.go +++ b/packer/build_test.go @@ -5,6 +5,10 @@ import ( "testing" ) +func boolPointer(tf bool) *bool { + return &tf +} + func testBuild() *coreBuild { return &coreBuild{ name: "test", @@ -19,7 +23,7 @@ func testBuild() *coreBuild { }, postProcessors: [][]coreBuildPostProcessor{ { - {&MockPostProcessor{ArtifactId: "pp"}, "testPP", make(map[string]interface{}), true}, + {&MockPostProcessor{ArtifactId: "pp"}, "testPP", make(map[string]interface{}), boolPointer(true)}, }, }, variables: make(map[string]string), @@ -245,7 +249,7 @@ func TestBuild_Run_Artifacts(t *testing.T) { build = testBuild() build.postProcessors = [][]coreBuildPostProcessor{ { - {&MockPostProcessor{ArtifactId: "pp"}, "pp", make(map[string]interface{}), false}, + {&MockPostProcessor{ArtifactId: "pp"}, "pp", make(map[string]interface{}), boolPointer(false)}, }, } @@ -270,10 +274,10 @@ func TestBuild_Run_Artifacts(t *testing.T) { build = testBuild() build.postProcessors = [][]coreBuildPostProcessor{ { - {&MockPostProcessor{ArtifactId: "pp1"}, "pp", make(map[string]interface{}), false}, + {&MockPostProcessor{ArtifactId: "pp1"}, "pp", make(map[string]interface{}), boolPointer(false)}, }, { - {&MockPostProcessor{ArtifactId: "pp2"}, "pp", make(map[string]interface{}), true}, + {&MockPostProcessor{ArtifactId: "pp2"}, "pp", make(map[string]interface{}), boolPointer(true)}, }, } @@ -298,12 +302,12 @@ func TestBuild_Run_Artifacts(t *testing.T) { build = testBuild() build.postProcessors = [][]coreBuildPostProcessor{ { - {&MockPostProcessor{ArtifactId: "pp1a"}, "pp", make(map[string]interface{}), false}, - {&MockPostProcessor{ArtifactId: "pp1b"}, "pp", make(map[string]interface{}), true}, + {&MockPostProcessor{ArtifactId: "pp1a"}, "pp", make(map[string]interface{}), boolPointer(false)}, + {&MockPostProcessor{ArtifactId: "pp1b"}, "pp", make(map[string]interface{}), boolPointer(true)}, }, { - {&MockPostProcessor{ArtifactId: "pp2a"}, "pp", make(map[string]interface{}), false}, - {&MockPostProcessor{ArtifactId: "pp2b"}, "pp", make(map[string]interface{}), false}, + {&MockPostProcessor{ArtifactId: "pp2a"}, "pp", make(map[string]interface{}), boolPointer(false)}, + {&MockPostProcessor{ArtifactId: "pp2b"}, "pp", make(map[string]interface{}), boolPointer(false)}, }, } @@ -329,7 +333,61 @@ func TestBuild_Run_Artifacts(t *testing.T) { build.postProcessors = [][]coreBuildPostProcessor{ { { - &MockPostProcessor{ArtifactId: "pp", Keep: true}, "pp", make(map[string]interface{}), false, + &MockPostProcessor{ArtifactId: "pp", Keep: true, ForceOverride: true}, "pp", make(map[string]interface{}), boolPointer(false), + }, + }, + } + + build.Prepare() + artifacts, err = build.Run(ui) + if err != nil { + t.Fatalf("err: %s", err) + } + + expectedIds = []string{"b", "pp"} + artifactIds = make([]string, len(artifacts)) + for i, artifact := range artifacts { + artifactIds[i] = artifact.Id() + } + + if !reflect.DeepEqual(artifactIds, expectedIds) { + t.Fatalf("unexpected ids: %#v", artifactIds) + } + + // Test case: Test that with a single post-processor that non-forcibly + // keeps inputs, that the artifacts are discarded if user overrides. + build = testBuild() + build.postProcessors = [][]coreBuildPostProcessor{ + { + { + &MockPostProcessor{ArtifactId: "pp", Keep: true, ForceOverride: false}, "pp", make(map[string]interface{}), boolPointer(false), + }, + }, + } + + build.Prepare() + artifacts, err = build.Run(ui) + if err != nil { + t.Fatalf("err: %s", err) + } + + expectedIds = []string{"pp"} + artifactIds = make([]string, len(artifacts)) + for i, artifact := range artifacts { + artifactIds[i] = artifact.Id() + } + + if !reflect.DeepEqual(artifactIds, expectedIds) { + t.Fatalf("unexpected ids: %#v", artifactIds) + } + + // Test case: Test that with a single post-processor that non-forcibly + // keeps inputs, that the artifacts are kept if user does not have preference. + build = testBuild() + build.postProcessors = [][]coreBuildPostProcessor{ + { + { + &MockPostProcessor{ArtifactId: "pp", Keep: true, ForceOverride: false}, "pp", make(map[string]interface{}), nil, }, }, } diff --git a/packer/plugin/post_processor_test.go b/packer/plugin/post_processor_test.go index a1e2f0f65..b6276611c 100644 --- a/packer/plugin/post_processor_test.go +++ b/packer/plugin/post_processor_test.go @@ -13,8 +13,8 @@ func (helperPostProcessor) Configure(...interface{}) error { return nil } -func (helperPostProcessor) PostProcess(packer.Ui, packer.Artifact) (packer.Artifact, bool, error) { - return nil, false, nil +func (helperPostProcessor) PostProcess(packer.Ui, packer.Artifact) (packer.Artifact, bool, bool, error) { + return nil, false, false, nil } func TestPostProcessor_NoExist(t *testing.T) { diff --git a/packer/rpc/post_processor_test.go b/packer/rpc/post_processor_test.go index 683b6dc16..f7438bf00 100644 --- a/packer/rpc/post_processor_test.go +++ b/packer/rpc/post_processor_test.go @@ -24,12 +24,12 @@ func (pp *TestPostProcessor) Configure(v ...interface{}) error { return nil } -func (pp *TestPostProcessor) PostProcess(ui packer.Ui, a packer.Artifact) (packer.Artifact, bool, error) { +func (pp *TestPostProcessor) PostProcess(ui packer.Ui, a packer.Artifact) (packer.Artifact, bool, bool, error) { pp.ppCalled = true pp.ppArtifact = a pp.ppArtifactId = a.Id() pp.ppUi = ui - return testPostProcessorArtifact, false, nil + return testPostProcessorArtifact, false, false, nil } func TestPostProcessorRPC(t *testing.T) { @@ -65,7 +65,7 @@ func TestPostProcessorRPC(t *testing.T) { IdValue: "ppTestId", } ui := new(testUi) - artifact, _, err := ppClient.PostProcess(ui, a) + artifact, _, _, err := ppClient.PostProcess(ui, a) if err != nil { t.Fatalf("err: %s", err) } diff --git a/post-processor/checksum/post-processor_test.go b/post-processor/checksum/post-processor_test.go index f2b91aca9..55e147859 100644 --- a/post-processor/checksum/post-processor_test.go +++ b/post-processor/checksum/post-processor_test.go @@ -98,7 +98,7 @@ func testChecksum(t *testing.T, config string) packer.Artifact { checksum.config.PackerBuildName = "vanilla" checksum.config.PackerBuilderType = "file" - artifactOut, _, err := checksum.PostProcess(ui, artifact) + artifactOut, _, _, err := checksum.PostProcess(ui, artifact) if err != nil { t.Fatalf("Failed to checksum artifact: %s", err) } diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index a3c6a8232..564964f65 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -238,7 +238,7 @@ func testArchive(t *testing.T, config string) packer.Artifact { compressor.config.PackerBuildName = "vanilla" compressor.config.PackerBuilderType = "file" - artifactOut, _, err := compressor.PostProcess(ui, artifact) + artifactOut, _, _, err := compressor.PostProcess(ui, artifact) if err != nil { t.Fatalf("Failed to compress artifact: %s", err) } diff --git a/post-processor/docker-push/post-processor_test.go b/post-processor/docker-push/post-processor_test.go index 1eee102cb..9309554e1 100644 --- a/post-processor/docker-push/post-processor_test.go +++ b/post-processor/docker-push/post-processor_test.go @@ -41,13 +41,16 @@ func TestPostProcessor_PostProcess(t *testing.T) { IdValue: "foo/bar", } - result, keep, err := p.PostProcess(testUi(), artifact) + result, keep, forceOverride, err := p.PostProcess(testUi(), artifact) if _, ok := result.(packer.Artifact); !ok { t.Fatal("should be instance of Artifact") } if !keep { t.Fatal("should keep") } + if forceOverride { + t.Fatal("Should default to keep, but not override user wishes") + } if err != nil { t.Fatalf("err: %s", err) } @@ -71,13 +74,16 @@ func TestPostProcessor_PostProcess_portInName(t *testing.T) { IdValue: "localhost:5000/foo/bar", } - result, keep, err := p.PostProcess(testUi(), artifact) + result, keep, forceOverride, err := p.PostProcess(testUi(), artifact) if _, ok := result.(packer.Artifact); !ok { t.Fatal("should be instance of Artifact") } if !keep { t.Fatal("should keep") } + if forceOverride { + t.Fatal("Should default to keep, but not override user wishes") + } if err != nil { t.Fatalf("err: %s", err) } @@ -101,13 +107,16 @@ func TestPostProcessor_PostProcess_tags(t *testing.T) { IdValue: "hashicorp/ubuntu:precise", } - result, keep, err := p.PostProcess(testUi(), artifact) + result, keep, forceOverride, err := p.PostProcess(testUi(), artifact) if _, ok := result.(packer.Artifact); !ok { t.Fatal("should be instance of Artifact") } if !keep { t.Fatal("should keep") } + if forceOverride { + t.Fatal("Should default to keep, but not override user wishes") + } if err != nil { t.Fatalf("err: %s", err) } diff --git a/post-processor/docker-tag/post-processor_test.go b/post-processor/docker-tag/post-processor_test.go index fef434ee9..110c5568a 100644 --- a/post-processor/docker-tag/post-processor_test.go +++ b/post-processor/docker-tag/post-processor_test.go @@ -48,13 +48,16 @@ func TestPostProcessor_PostProcess(t *testing.T) { IdValue: "1234567890abcdef", } - result, keep, err := p.PostProcess(testUi(), artifact) + result, keep, forceOverride, err := p.PostProcess(testUi(), artifact) if _, ok := result.(packer.Artifact); !ok { t.Fatal("should be instance of Artifact") } if !keep { t.Fatal("should keep") } + if !forceOverride { + t.Fatal("Should force keep no matter what user sets.") + } if err != nil { t.Fatalf("err: %s", err) } @@ -87,13 +90,16 @@ func TestPostProcessor_PostProcess_Force(t *testing.T) { IdValue: "1234567890abcdef", } - result, keep, err := p.PostProcess(testUi(), artifact) + result, keep, forceOverride, err := p.PostProcess(testUi(), artifact) if _, ok := result.(packer.Artifact); !ok { t.Fatal("should be instance of Artifact") } if !keep { t.Fatal("should keep") } + if !forceOverride { + t.Fatal("Should force keep no matter what user sets.") + } if err != nil { t.Fatalf("err: %s", err) } diff --git a/post-processor/vagrant/post-processor_test.go b/post-processor/vagrant/post-processor_test.go index 8a5368737..749ed162a 100644 --- a/post-processor/vagrant/post-processor_test.go +++ b/post-processor/vagrant/post-processor_test.go @@ -151,7 +151,7 @@ func TestPostProcessorPostProcess_badId(t *testing.T) { BuilderIdValue: "invalid.packer", } - _, _, err := testPP(t).PostProcess(testUi(), artifact) + _, _, _, err := testPP(t).PostProcess(testUi(), artifact) if !strings.Contains(err.Error(), "artifact type") { t.Fatalf("err: %s", err) } @@ -181,7 +181,7 @@ func TestPostProcessorPostProcess_vagrantfileUserVariable(t *testing.T) { a := &packer.MockArtifact{ BuilderIdValue: "packer.parallels", } - a2, _, err := p.PostProcess(testUi(), a) + a2, _, _, err := p.PostProcess(testUi(), a) if a2 != nil { for _, fn := range a2.Files() { defer os.Remove(fn) diff --git a/template/parse_test.go b/template/parse_test.go index 08d7781f5..d67f31ec3 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -14,6 +14,10 @@ import ( "github.com/google/go-cmp/cmp" ) +func boolPointer(tf bool) *bool { + return &tf +} + func TestParse(t *testing.T) { cases := []struct { File string @@ -205,7 +209,7 @@ func TestParse(t *testing.T) { { Name: "foo", Type: "foo", - KeepInputArtifact: true, + KeepInputArtifact: boolPointer(true), }, }, }, From 1b77b05ce2e8d20236dc7cd0cfa9bad511fd34e2 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 3 Apr 2019 12:03:40 -0700 Subject: [PATCH 22/47] remove redundant keep_input_artifact from compress pp and clarify keep behavior in shell-local pp --- post-processor/compress/post-processor.go | 20 +++++++++----------- post-processor/shell-local/post-processor.go | 12 ++++++++---- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 4d19d6bfd..a10156547 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -37,10 +37,9 @@ type Config struct { common.PackerConfig `mapstructure:",squash"` // Fields from config file - OutputPath string `mapstructure:"output"` - Format string `mapstructure:"format"` - CompressionLevel int `mapstructure:"compression_level"` - KeepInputArtifact bool `mapstructure:"keep_input_artifact"` + OutputPath string `mapstructure:"output"` + Format string `mapstructure:"format"` + CompressionLevel int `mapstructure:"compression_level"` // Derived fields Archive string @@ -115,7 +114,6 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac fmt.Println(target) } - keep := p.config.KeepInputArtifact newArtifact := &Artifact{Path: target} if err = os.MkdirAll(filepath.Dir(target), os.FileMode(0755)); err != nil { @@ -168,19 +166,19 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Say(fmt.Sprintf("Tarring %s with %s", target, compression)) err = createTarArchive(artifact.Files(), output) if err != nil { - return nil, keep, false, fmt.Errorf("Error creating tar: %s", err) + return nil, false, false, fmt.Errorf("Error creating tar: %s", err) } case "zip": ui.Say(fmt.Sprintf("Zipping %s", target)) err = createZipArchive(artifact.Files(), output) if err != nil { - return nil, keep, false, fmt.Errorf("Error creating zip: %s", err) + return nil, false, false, fmt.Errorf("Error creating zip: %s", err) } default: // Filename indicates no tarball (just compress) so we'll do an io.Copy // into our compressor. if len(artifact.Files()) != 1 { - return nil, keep, false, fmt.Errorf( + return nil, false, false, fmt.Errorf( "Can only have 1 input file when not using tar/zip. Found %d "+ "files: %v", len(artifact.Files()), artifact.Files()) } @@ -189,21 +187,21 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac source, err := os.Open(archiveFile) if err != nil { - return nil, keep, false, fmt.Errorf( + return nil, false, false, fmt.Errorf( "Failed to open source file %s for reading: %s", archiveFile, err) } defer source.Close() if _, err = io.Copy(output, source); err != nil { - return nil, keep, false, fmt.Errorf("Failed to compress %s: %s", + return nil, false, false, fmt.Errorf("Failed to compress %s: %s", archiveFile, err) } } ui.Say(fmt.Sprintf("Archive %s completed", target)) - return newArtifact, keep, false, nil + return newArtifact, false, false, nil } func (config *Config) detectFromFilename() { diff --git a/post-processor/shell-local/post-processor.go b/post-processor/shell-local/post-processor.go index a4b53f95b..e3cf89457 100644 --- a/post-processor/shell-local/post-processor.go +++ b/post-processor/shell-local/post-processor.go @@ -39,10 +39,14 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac // this particular post-processor doesn't do anything with the artifact // except to return it. - retBool, retErr := sl.Run(ui, &p.config) - if !retBool { - return nil, retBool, false, retErr + success, retErr := sl.Run(ui, &p.config) + if !success { + return nil, false, false, retErr } - return artifact, retBool, false, retErr + // Force shell-local pp to keep the input artifact, because otherwise we'll + // lose it instead of being able to pass it through. If oyu want to delete + // the input artifact for a shell local pp, use the artifice pp to create a + // new artifact + return artifact, true, true, retErr } From 9dafa310f3c0199049e9a16d456f9064e9e52f5a Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 3 Apr 2019 12:05:38 -0700 Subject: [PATCH 23/47] remove redundant keep_input_artifact code from googlecompute-export and googlecompute-import pps. The behavior coded here was already enforced by the core postprocessor code in packer/build.go --- .../googlecompute-export/post-processor.go | 25 +++++++++---------- .../googlecompute-import/post-processor.go | 9 +++---- 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/post-processor/googlecompute-export/post-processor.go b/post-processor/googlecompute-export/post-processor.go index 0d11b0c85..1ce45ea8e 100644 --- a/post-processor/googlecompute-export/post-processor.go +++ b/post-processor/googlecompute-export/post-processor.go @@ -17,14 +17,13 @@ type Config struct { AccountFile string `mapstructure:"account_file"` - DiskSizeGb int64 `mapstructure:"disk_size"` - DiskType string `mapstructure:"disk_type"` - KeepOriginalImage bool `mapstructure:"keep_input_artifact"` - MachineType string `mapstructure:"machine_type"` - Network string `mapstructure:"network"` - Paths []string `mapstructure:"paths"` - Subnetwork string `mapstructure:"subnetwork"` - Zone string `mapstructure:"zone"` + DiskSizeGb int64 `mapstructure:"disk_size"` + DiskType string `mapstructure:"disk_type"` + MachineType string `mapstructure:"machine_type"` + Network string `mapstructure:"network"` + Paths []string `mapstructure:"paths"` + Subnetwork string `mapstructure:"subnetwork"` + Zone string `mapstructure:"zone"` Account googlecompute.AccountFile ctx interpolate.Context @@ -80,7 +79,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac err := fmt.Errorf( "Unknown artifact type: %s\nCan only export from Google Compute Engine builder artifacts.", artifact.BuilderId()) - return nil, p.config.KeepOriginalImage, false, err + return nil, false, false, err } builderAccountFile := artifact.State("AccountFilePath").(string) @@ -98,13 +97,13 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac if builderAccountFile != "" { err := googlecompute.ProcessAccountFile(&p.config.Account, builderAccountFile) if err != nil { - return nil, p.config.KeepOriginalImage, false, err + return nil, false, false, err } } if p.config.AccountFile != "" { err := googlecompute.ProcessAccountFile(&p.config.Account, p.config.AccountFile) if err != nil { - return nil, p.config.KeepOriginalImage, false, err + return nil, false, false, err } } @@ -141,7 +140,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac driver, err := googlecompute.NewDriverGCE(ui, builderProjectId, &p.config.Account) if err != nil { - return nil, p.config.KeepOriginalImage, false, err + return nil, false, false, err } // Set up the state. @@ -169,5 +168,5 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac result := &Artifact{paths: p.config.Paths} - return result, p.config.KeepOriginalImage, false, nil + return result, false, false, nil } diff --git a/post-processor/googlecompute-import/post-processor.go b/post-processor/googlecompute-import/post-processor.go index 9ff930f1c..a211af39b 100644 --- a/post-processor/googlecompute-import/post-processor.go +++ b/post-processor/googlecompute-import/post-processor.go @@ -31,7 +31,6 @@ type Config struct { ImageGuestOsFeatures []string `mapstructure:"image_guest_os_features"` ImageLabels map[string]string `mapstructure:"image_labels"` ImageName string `mapstructure:"image_name"` - KeepOriginalImage bool `mapstructure:"keep_input_artifact"` SkipClean bool `mapstructure:"skip_clean"` Account googlecompute.AccountFile @@ -114,22 +113,22 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac rawImageGcsPath, err := UploadToBucket(client, ui, artifact, p.config.Bucket, p.config.GCSObjectName) if err != nil { - return nil, p.config.KeepOriginalImage, false, err + return nil, false, false, err } gceImageArtifact, err := CreateGceImage(client, ui, p.config.ProjectId, rawImageGcsPath, p.config.ImageName, p.config.ImageDescription, p.config.ImageFamily, p.config.ImageLabels, p.config.ImageGuestOsFeatures) if err != nil { - return nil, p.config.KeepOriginalImage, false, err + return nil, false, false, err } if !p.config.SkipClean { err = DeleteFromBucket(client, ui, p.config.Bucket, p.config.GCSObjectName) if err != nil { - return nil, p.config.KeepOriginalImage, false, err + return nil, false, false, err } } - return gceImageArtifact, p.config.KeepOriginalImage, false, nil + return gceImageArtifact, false, false, nil } func UploadToBucket(client *http.Client, ui packer.Ui, artifact packer.Artifact, bucket string, gcsObjectName string) (string, error) { From 10f47b5158c13f677aba9bcade56420108acb454 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 3 Apr 2019 13:57:22 -0700 Subject: [PATCH 24/47] document clearly what keep_input_artifact does for each post-processor --- packer/build.go | 2 +- .../source/docs/post-processors/alicloud-import.html.md | 3 +++ .../source/docs/post-processors/amazon-import.html.md | 3 +++ website/source/docs/post-processors/artifice.html.md | 5 +++++ website/source/docs/post-processors/checksum.html.md | 5 +++++ website/source/docs/post-processors/compress.html.md | 4 +++- .../docs/post-processors/digitalocean-import.html.md | 3 +++ .../source/docs/post-processors/docker-import.html.md | 3 +++ website/source/docs/post-processors/docker-push.html.md | 4 ++++ website/source/docs/post-processors/docker-save.html.md | 7 +++++++ website/source/docs/post-processors/docker-tag.html.md | 7 +++++++ .../docs/post-processors/googlecompute-export.html.md | 4 ++-- website/source/docs/post-processors/manifest.html.md | 7 +++++++ website/source/docs/post-processors/shell-local.html.md | 9 +++++++++ .../source/docs/post-processors/vagrant-cloud.html.md | 3 +++ website/source/docs/post-processors/vagrant.html.md | 8 ++++++-- .../source/docs/post-processors/vsphere-template.html.md | 6 ++++++ website/source/docs/post-processors/vsphere.html.md | 3 +++ 18 files changed, 80 insertions(+), 6 deletions(-) diff --git a/packer/build.go b/packer/build.go index af9a307f8..a8c624bbc 100644 --- a/packer/build.go +++ b/packer/build.go @@ -281,7 +281,7 @@ PostProcessorRunSeqLoop: // useless if keep isn't set, force an override that still uses // post-processor preference instead of user preference. if corePP.keepInputArtifact != nil { - if *corePP.keepInputArtifact == false && forceOverride { + if defaultKeep && *corePP.keepInputArtifact == false && forceOverride { log.Printf("The %s post-processor forces "+ "keep_input_artifact=true to preserve integrity of the"+ "build chain. User-set keep_input_artifact=false will be"+ diff --git a/website/source/docs/post-processors/alicloud-import.html.md b/website/source/docs/post-processors/alicloud-import.html.md index 7f26c42d7..fb28f6163 100644 --- a/website/source/docs/post-processors/alicloud-import.html.md +++ b/website/source/docs/post-processors/alicloud-import.html.md @@ -60,6 +60,9 @@ are two categories: required and optional parameters. ### Optional: +- `keep_input_artifact` (boolean) - if true, do not delete the RAW or VHD + disk image after importing it to the cloud. Defaults to false. + - `oss_key_name` (string) - The name of the object key in `oss_bucket_name` where the RAW or VHD file will be copied to for import. diff --git a/website/source/docs/post-processors/amazon-import.html.md b/website/source/docs/post-processors/amazon-import.html.md index 95bb10958..21f24eeea 100644 --- a/website/source/docs/post-processors/amazon-import.html.md +++ b/website/source/docs/post-processors/amazon-import.html.md @@ -101,6 +101,9 @@ Optional: - `insecure_skip_tls_verify` (boolean) - This allows skipping TLS verification of the AWS EC2 endpoint. The default is `false`. +- `keep_input_artifact` (boolean) - if true, do not delete the source virtual + machine image after importing it to the cloud. Defaults to false. + - `license_type` (string) - The license type to be used for the Amazon Machine Image (AMI) after importing. Valid values: `AWS` or `BYOL` (default). For more details regarding licensing, see diff --git a/website/source/docs/post-processors/artifice.html.md b/website/source/docs/post-processors/artifice.html.md index e7b287315..e6bd8e89d 100644 --- a/website/source/docs/post-processors/artifice.html.md +++ b/website/source/docs/post-processors/artifice.html.md @@ -59,6 +59,11 @@ The configuration allows you to specify which files comprise your artifact. packer is complete. These will replace any of the builder's original artifacts (such as a VM snapshot). +### Optional: + +- `keep_input_artifact` (boolean) - if true, do not delete the original + artifact files after creating your new artifact. Defaults to true. + ### Example Configuration This minimal example: diff --git a/website/source/docs/post-processors/checksum.html.md b/website/source/docs/post-processors/checksum.html.md index fa66cb153..e95a280d9 100644 --- a/website/source/docs/post-processors/checksum.html.md +++ b/website/source/docs/post-processors/checksum.html.md @@ -43,6 +43,11 @@ Optional parameters: - `checksum_types` (array of strings) - An array of strings of checksum types to compute. Allowed values are md5, sha1, sha224, sha256, sha384, sha512. + +- `keep_input_artifact` (boolean) - Unlike most post-processors, setting + `keep_input_artifact` will have no effect; the checksum post-processor + always saves the artifact that it is calculating the checksum for. + - `output` (string) - Specify filename to store checksums. This defaults to `packer_{{.BuildName}}_{{.BuilderType}}_{{.ChecksumType}}.checksum`. For example, if you had a builder named `database`, you might see the file diff --git a/website/source/docs/post-processors/compress.html.md b/website/source/docs/post-processors/compress.html.md index c4456f5de..f908612ae 100644 --- a/website/source/docs/post-processors/compress.html.md +++ b/website/source/docs/post-processors/compress.html.md @@ -38,7 +38,9 @@ you will need to specify the `output` option. algorithms that support it, from 1 through 9 inclusive. Typically higher compression levels take longer but produce smaller files. Defaults to `6` -- `keep_input_artifact` (boolean) - Keep source files; defaults to `false` +- `keep_input_artifact` (boolean) - if `true`, keep both the source files and + the compressed file; if `false`, discard the source files. Defaults to + `false` ### Supported Formats diff --git a/website/source/docs/post-processors/digitalocean-import.html.md b/website/source/docs/post-processors/digitalocean-import.html.md index 3120b2acb..48a757a76 100644 --- a/website/source/docs/post-processors/digitalocean-import.html.md +++ b/website/source/docs/post-processors/digitalocean-import.html.md @@ -67,6 +67,9 @@ Optional: - `image_tags` (array of strings) - A list of tags to apply to the resulting imported image. +- `keep_input_artifact` (boolean) - if true, do not delete the source virtual + machine image after importing it to the cloud. Defaults to false. + - `skip_clean` (boolean) - Whether we should skip removing the image file uploaded to Spaces after the import process has completed. "true" means that we should leave it in the Space, "false" means to clean it out. diff --git a/website/source/docs/post-processors/docker-import.html.md b/website/source/docs/post-processors/docker-import.html.md index 4fb3b0fe3..63665d38a 100644 --- a/website/source/docs/post-processors/docker-import.html.md +++ b/website/source/docs/post-processors/docker-import.html.md @@ -38,6 +38,9 @@ is optional. commit. Example of instructions are `CMD`, `ENTRYPOINT`, `ENV`, and `EXPOSE`. Example: `[ "USER ubuntu", "WORKDIR /app", "EXPOSE 8080" ]` +- `keep_input_artifact` (boolean) - if true, do not delete the source tar + after importing it to docker. Defaults to false. + ## Example An example is shown below, showing only the post-processor configuration: diff --git a/website/source/docs/post-processors/docker-push.html.md b/website/source/docs/post-processors/docker-push.html.md index a63ce9aad..5c626b513 100644 --- a/website/source/docs/post-processors/docker-push.html.md +++ b/website/source/docs/post-processors/docker-push.html.md @@ -42,6 +42,10 @@ This post-processor has only optional configuration: the duration of the push. If true `login_server` is required and `login`, `login_username`, and `login_password` will be ignored. +- `keep_input_artifact` (boolean) - if true, do not delete the docker image + after pushing it to the cloud. Defaults to true, but can be set to false if + you do not need to save your local copy of the docker container. + - `login` (boolean) - Defaults to false. If true, the post-processor will login prior to pushing. For log into ECR see `ecr_login`. diff --git a/website/source/docs/post-processors/docker-save.html.md b/website/source/docs/post-processors/docker-save.html.md index 2fb4bc572..4063d335e 100644 --- a/website/source/docs/post-processors/docker-save.html.md +++ b/website/source/docs/post-processors/docker-save.html.md @@ -24,10 +24,17 @@ familiar with this and vice versa. ## Configuration +### Required + The configuration for this post-processor only requires one option. - `path` (string) - The path to save the image. +### Optional + +- `keep_input_artifact` (boolean) - if true, do not delete the docker + container, and only save the .tar created by docker save. Defaults to true. + ## Example An example is shown below, showing only the post-processor configuration: diff --git a/website/source/docs/post-processors/docker-tag.html.md b/website/source/docs/post-processors/docker-tag.html.md index 3720bff66..07d441abe 100644 --- a/website/source/docs/post-processors/docker-tag.html.md +++ b/website/source/docs/post-processors/docker-tag.html.md @@ -38,6 +38,13 @@ settings are optional. after 1.12.0. [reference](https://docs.docker.com/engine/deprecated/#/f-flag-on-docker-tag) +- `keep_input_artifact` (boolean) - Unlike most other post-processors, the + keep_input_artifact option will have no effect for the docker-tag + post-processor. We will always retain the input artifact for docker-tag, + since deleting the image we just tagged is not a behavior anyone should ever + expect. `keep_input_artifact will` therefore always be evaluated as true, + regardless of the value you enter into this field. + ## Example An example is shown below, showing only the post-processor configuration: diff --git a/website/source/docs/post-processors/googlecompute-export.html.md b/website/source/docs/post-processors/googlecompute-export.html.md index 0484b0c78..79c768a31 100644 --- a/website/source/docs/post-processors/googlecompute-export.html.md +++ b/website/source/docs/post-processors/googlecompute-export.html.md @@ -45,8 +45,8 @@ permissions to the GCS `paths`. - `disk_type` (string) - Type of disk used to back export instance, like `pd-ssd` or `pd-standard`. Defaults to `pd-ssd`. -- `keep_input_artifact` (boolean) - If true, do not delete the Google Compute - Engine (GCE) image being exported. +- `keep_input_artifact` (boolean) - If `true`, do not delete the Google Compute + Engine (GCE) image being exported. defaults to `false`. - `machine_type` (string) - The export instance machine type. Defaults to `"n1-highcpu-4"`. diff --git a/website/source/docs/post-processors/manifest.html.md b/website/source/docs/post-processors/manifest.html.md index 3bb9bef3b..ace26d528 100644 --- a/website/source/docs/post-processors/manifest.html.md +++ b/website/source/docs/post-processors/manifest.html.md @@ -40,6 +40,13 @@ post-processors such as Docker and Artifice. file. This defaults to false. - `custom_data` (map of strings) Arbitrary data to add to the manifest. +- `keep_input_artifact` (boolean) - Unlike most other post-processors, the + keep_input_artifact option will have no effect for the manifest + post-processor. We will always retain the input artifact for manifest, + since deleting the files we just recorded is not a behavior anyone should + ever expect. `keep_input_artifact will` therefore always be evaluated as + true, regardless of the value you enter into this field. + ### Example Configuration You can simply add `{"type":"manifest"}` to your post-processor section. Below diff --git a/website/source/docs/post-processors/shell-local.html.md b/website/source/docs/post-processors/shell-local.html.md index 7599febf2..0d7c1f4ee 100644 --- a/website/source/docs/post-processors/shell-local.html.md +++ b/website/source/docs/post-processors/shell-local.html.md @@ -101,6 +101,15 @@ Optional parameters: like the `-e` flag, otherwise individual steps failing won't fail the provisioner. +- `keep_input_artifact` (boolean) - Unlike most other post-processors, the + keep_input_artifact option will have no effect for the shell-local + post-processor. Packer will always retain the input artifact for + shell-local, since the shell-local post-processor merely passes forward the + artifact it receives. If your shell-local post-processor produces a file or + files which you would like to have replace the input artifact, you may + overwrite the input artifact using the [artifice](./artifice.html) + post-processor after your shell-local processor has run. + - `only_on` (array of strings) - This is an array of [runtime operating systems](https://golang.org/doc/install/source#environment) where `shell-local` will execute. This allows you to execute `shell-local` *only* diff --git a/website/source/docs/post-processors/vagrant-cloud.html.md b/website/source/docs/post-processors/vagrant-cloud.html.md index 2accdfb0b..1edac0f8a 100644 --- a/website/source/docs/post-processors/vagrant-cloud.html.md +++ b/website/source/docs/post-processors/vagrant-cloud.html.md @@ -84,6 +84,9 @@ on Vagrant Cloud, as well as authentication and version information. to set this option to true if your host at vagrant_cloud_url is using a self-signed certificate. +- `keep_input_artifact` (boolean) - When true, preserve the local box + after uploading to Vagrant cloud. Defaults to `true`. + - `version_description` (string) - Optionally markdown text used as a full-length and in-depth description of the version, typically for denoting changes introduced diff --git a/website/source/docs/post-processors/vagrant.html.md b/website/source/docs/post-processors/vagrant.html.md index 253472ed3..62cd0ce0f 100644 --- a/website/source/docs/post-processors/vagrant.html.md +++ b/website/source/docs/post-processors/vagrant.html.md @@ -65,8 +65,12 @@ more details about certain options in following sections. Vagrant box (regardless of their paths). They can then be used from the Vagrantfile. -- `keep_input_artifact` (boolean) - If set to true, do not delete the - `output_directory` on a successful build. Defaults to false. +- `keep_input_artifact` (boolean) - When true, preserve the artifact we use to + create the vagrant box. Defaults to `false`, except when you set a cloud + provider (e.g. aws, azure, google, digitalocean). In these cases deleting + the input artifact would render the vagrant box useless, so we always keep + these artifacts -- even if you specifically set + `"keep_input_artifact":false` - `output` (string) - The full path to the box file that will be created by this post-processor. This is a [configuration diff --git a/website/source/docs/post-processors/vsphere-template.html.md b/website/source/docs/post-processors/vsphere-template.html.md index a3ef0e34e..319eeb75e 100644 --- a/website/source/docs/post-processors/vsphere-template.html.md +++ b/website/source/docs/post-processors/vsphere-template.html.md @@ -61,6 +61,12 @@ Optional: - `insecure` (boolean) - If it's true skip verification of server certificate. Default is false +- `keep_input_artifact` (boolean) - Unlike most post-processors, this option + has no effect for vsphere-template. This is because in order for a template + to work, you can't delete the vm that you generate the template from. The + vsphere template post-processor will therefore always preserve the original + vm. + - `snapshot_enable` (boolean) - Create a snapshot before marking as a template. Default is false diff --git a/website/source/docs/post-processors/vsphere.html.md b/website/source/docs/post-processors/vsphere.html.md index a6646e2f2..8976e93cc 100644 --- a/website/source/docs/post-processors/vsphere.html.md +++ b/website/source/docs/post-processors/vsphere.html.md @@ -54,6 +54,9 @@ Optional: - `insecure` (boolean) - Whether or not the connection to vSphere can be done over an insecure connection. By default this is false. +- `keep_input_artifact` (boolean) - When `true`, preserve the local VM files, + even after importing them to vsphere. Defaults to `false`. + - `resource_pool` (string) - The resource pool to upload the VM to. - `vm_folder` (string) - The folder within the datastore to store the VM. From 96c94d2fa0953707891370668fe7b4ccc829cee3 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 3 Apr 2019 14:04:03 -0700 Subject: [PATCH 25/47] clean up code comments --- packer/build.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/packer/build.go b/packer/build.go index a8c624bbc..ee5274e57 100644 --- a/packer/build.go +++ b/packer/build.go @@ -275,11 +275,13 @@ PostProcessorRunSeqLoop: } keep := defaultKeep - // When nil, go for the default. If overridden by user, use that - // instead. + // When user has not set keep_input_artifuact + // corePP.keepInputArtifact is nil. + // In this case, use the keepDefault provided by the postprocessor. + // When user _has_ set keep_input_atifact, go with that instead. // Exception: for postprocessors that will fail/become - // useless if keep isn't set, force an override that still uses - // post-processor preference instead of user preference. + // useless if keep isn't true, heed forceOverride and keep the + // input artifact regardless of user preference. if corePP.keepInputArtifact != nil { if defaultKeep && *corePP.keepInputArtifact == false && forceOverride { log.Printf("The %s post-processor forces "+ @@ -287,7 +289,7 @@ PostProcessorRunSeqLoop: "build chain. User-set keep_input_artifact=false will be"+ "ignored.", corePP.processorType) } else { - // User overrides default + // User overrides default. keep = *corePP.keepInputArtifact } } From bab431724d2d12053e79a491d6c6ca04e8715a03 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 3 Apr 2019 16:39:27 -0700 Subject: [PATCH 26/47] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b46f16560..6da4b9d1b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ * builder/amazon-chroot: Fix building PV images and where mount_partition is set [GH-7337] * builder/amazon: Fix http_proxy env var regression [GH-7361] +* builder/azure: Fix: Power off before taking snapshot (windows) [GH-7464] * builder/hcloud: Fix usage of freebsd64 rescue image [GH-7381] * builder/vagrant: windows : fix docs and usage [GH-7416] [GH-7417] * builder/vmware-esxi: properly copy .vmxf files in remote vmx builds [GH-7357] From 79143ade8f8571f9ac1ae864bfbae311d7dd6e59 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 4 Apr 2019 14:56:53 -0700 Subject: [PATCH 27/47] building on go master is just wasting our time. --- .travis.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index d2def5ca5..023ef7b6b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,7 +12,6 @@ language: go go: - 1.12.x - - master before_install: - > @@ -30,6 +29,5 @@ branches: matrix: allow_failures: - - go: master - os: windows fast_finish: true From 217a93fc3041df38adf68a56e1558156909994cb Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 4 Apr 2019 13:22:07 -0700 Subject: [PATCH 28/47] fix build_test.go on windows. This code needs to not be run in parallel, or else there can be race conditions with the post-processors all trying to access the same file at once. Additionally, I changed the test template itself so that it is valid code on either bash or cmd. Finally, I found and fixed a small bug in naming the file extensions in shell local inline scripts. --- command/build_test.go | 6 +++++- command/test-fixtures/build-only/template.json | 10 +++++----- common/shell-local/run.go | 4 +++- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/command/build_test.go b/command/build_test.go index 2a2ba1734..50f3db119 100644 --- a/command/build_test.go +++ b/command/build_test.go @@ -17,6 +17,7 @@ func TestBuildOnlyFileCommaFlags(t *testing.T) { } args := []string{ + "-parallel=false", "-only=chocolate,vanilla", filepath.Join(testFixture("build-only"), "template.json"), } @@ -58,7 +59,7 @@ func TestBuildStdin(t *testing.T) { defer func() { os.Stdin = stdin }() defer cleanup() - if code := c.Run([]string{"-"}); code != 0 { + if code := c.Run([]string{"-parallel=false", "-"}); code != 0 { fatalCommand(t, c.Meta) } @@ -76,6 +77,7 @@ func TestBuildOnlyFileMultipleFlags(t *testing.T) { } args := []string{ + "-parallel=false", "-only=chocolate", "-only=cherry", "-only=apple", // ignored @@ -109,6 +111,7 @@ func TestBuildEverything(t *testing.T) { } args := []string{ + "-parallel=false", `-except=`, filepath.Join(testFixture("build-only"), "template.json"), } @@ -133,6 +136,7 @@ func TestBuildExceptFileCommaFlags(t *testing.T) { } args := []string{ + "-parallel=false", "-except=chocolate,vanilla", filepath.Join(testFixture("build-only"), "template.json"), } diff --git a/command/test-fixtures/build-only/template.json b/command/test-fixtures/build-only/template.json index 01e6bfd13..d00f4e516 100644 --- a/command/test-fixtures/build-only/template.json +++ b/command/test-fixtures/build-only/template.json @@ -24,19 +24,19 @@ { "name": "apple", "type": "shell-local", - "inline": [ "touch apple.txt" ] + "inline": [ "echo apple > apple.txt" ] }, { "name": "peach", "type": "shell-local", - "inline": [ "touch peach.txt" ] + "inline": [ "echo peach > peach.txt" ] } ], [ { "name": "pear", "type": "shell-local", - "inline": [ "touch pear.txt" ] + "inline": [ "echo pear > pear.txt" ] } ], [ @@ -46,7 +46,7 @@ ], "name": "tomato", "type": "shell-local", - "inline": [ "touch tomato.txt" ] + "inline": [ "echo tomato > tomato.txt" ] } ], [ @@ -55,7 +55,7 @@ "chocolate" ], "type": "shell-local", - "inline": [ "touch unnamed.txt" ] + "inline": [ "echo unnamed > unnamed.txt" ] } ] ] diff --git a/common/shell-local/run.go b/common/shell-local/run.go index e1706f86d..8bb5e0246 100644 --- a/common/shell-local/run.go +++ b/common/shell-local/run.go @@ -54,13 +54,15 @@ func Run(ui packer.Ui, config *Config) (bool, error) { if err != nil { return false, err } - scripts = append(scripts, tempScriptFileName) // figure out what extension the file should have, and rename it. if config.TempfileExtension != "" { os.Rename(tempScriptFileName, fmt.Sprintf("%s.%s", tempScriptFileName, config.TempfileExtension)) tempScriptFileName = fmt.Sprintf("%s.%s", tempScriptFileName, config.TempfileExtension) } + + scripts = append(scripts, tempScriptFileName) + defer os.Remove(tempScriptFileName) } From 9f8fc37fde9e9f61656caa27b8e25b75343d46bb Mon Sep 17 00:00:00 2001 From: Calle Pettersson Date: Sun, 10 Mar 2019 14:39:47 +0100 Subject: [PATCH 29/47] Implement Proxmox builder --- builder/proxmox/artifact.go | 44 ++++ builder/proxmox/bootcommand_driver.go | 123 +++++++++++ builder/proxmox/builder.go | 129 ++++++++++++ builder/proxmox/config.go | 196 ++++++++++++++++++ builder/proxmox/step_convert_to_template.go | 46 ++++ .../proxmox/step_finalize_template_config.go | 65 ++++++ builder/proxmox/step_start_vm.go | 143 +++++++++++++ builder/proxmox/step_success.go | 22 ++ builder/proxmox/step_type_boot_command.go | 110 ++++++++++ command/plugin.go | 2 + 10 files changed, 880 insertions(+) create mode 100644 builder/proxmox/artifact.go create mode 100644 builder/proxmox/bootcommand_driver.go create mode 100644 builder/proxmox/builder.go create mode 100644 builder/proxmox/config.go create mode 100644 builder/proxmox/step_convert_to_template.go create mode 100644 builder/proxmox/step_finalize_template_config.go create mode 100644 builder/proxmox/step_start_vm.go create mode 100644 builder/proxmox/step_success.go create mode 100644 builder/proxmox/step_type_boot_command.go diff --git a/builder/proxmox/artifact.go b/builder/proxmox/artifact.go new file mode 100644 index 000000000..99d8e1a20 --- /dev/null +++ b/builder/proxmox/artifact.go @@ -0,0 +1,44 @@ +package proxmox + +import ( + "fmt" + "log" + "strconv" + + "github.com/Telmate/proxmox-api-go/proxmox" + "github.com/hashicorp/packer/packer" +) + +type Artifact struct { + templateID int + proxmoxClient *proxmox.Client +} + +// Artifact implements packer.Artifact +var _ packer.Artifact = &Artifact{} + +func (*Artifact) BuilderId() string { + return BuilderId +} + +func (*Artifact) Files() []string { + return nil +} + +func (a *Artifact) Id() string { + return strconv.Itoa(a.templateID) +} + +func (a *Artifact) String() string { + return fmt.Sprintf("A template was created: %d", a.templateID) +} + +func (a *Artifact) State(name string) interface{} { + return nil +} + +func (a *Artifact) Destroy() error { + log.Printf("Destroying template: %d", a.templateID) + _, err := a.proxmoxClient.DeleteVm(proxmox.NewVmRef(a.templateID)) + return err +} diff --git a/builder/proxmox/bootcommand_driver.go b/builder/proxmox/bootcommand_driver.go new file mode 100644 index 000000000..b8debaafb --- /dev/null +++ b/builder/proxmox/bootcommand_driver.go @@ -0,0 +1,123 @@ +package proxmox + +import ( + "fmt" + "strings" + "time" + "unicode" + + "github.com/Telmate/proxmox-api-go/proxmox" + "github.com/hashicorp/packer/common/bootcommand" +) + +type proxmoxDriver struct { + client *proxmox.Client + vmRef *proxmox.VmRef + specialMap map[string]string + runeMap map[rune]string + interval time.Duration +} + +func NewProxmoxDriver(c *proxmox.Client, vmRef *proxmox.VmRef, interval time.Duration) *proxmoxDriver { + // Mappings for packer shorthand to qemu qkeycodes + sMap := map[string]string{ + "spacebar": "spc", + "bs": "backspace", + "del": "delete", + "return": "ret", + "enter": "ret", + "pageUp": "pgup", + "pageDown": "pgdn", + } + // Mappings for runes that need to be translated to special qkeycodes + // Taken from https://github.com/qemu/qemu/blob/master/pc-bios/keymaps/en-us + rMap := map[rune]string{ + // Clean mappings + ' ': "spc", + '.': "dot", + ',': "comma", + ';': "semicolon", + '*': "asterisk", + '-': "minus", + '[': "bracket_left", + ']': "bracket_right", + '=': "equal", + '\'': "apostrophe", + '`': "grave_accent", + '/': "slash", + '\\': "backslash", + + '!': "shift-1", // "exclam" + '@': "shift-2", // "at" + '#': "shift-3", // "numbersign" + '$': "shift-4", // "dollar" + '%': "shift-5", // "percent" + '^': "shift-6", // "asciicircum" + '&': "shift-7", // "ampersand" + '(': "shift-9", // "parenleft" + ')': "shift-0", // "parenright" + '{': "shift-bracket_left", // "braceleft" + '}': "shift-bracket_right", // "braceright" + '"': "shift-apostrophe", // "quotedbl" + '+': "shift-equal", // "plus" + '_': "shift-minus", // "underscore" + ':': "shift-semicolon", // "colon" + '<': "shift-comma", // "less" is recognized, but seem to map to '/'? + '>': "shift-dot", // "greater" + '~': "shift-grave_accent", // "asciitilde" + '?': "shift-slash", // "question" + '|': "shift-backslash", // "bar" + } + + return &proxmoxDriver{ + client: c, + vmRef: vmRef, + specialMap: sMap, + runeMap: rMap, + interval: interval, + } +} + +func (p *proxmoxDriver) SendKey(key rune, action bootcommand.KeyAction) error { + if special, ok := p.runeMap[key]; ok { + return p.send(special) + } + + const shiftFormat = "shift-%c" + const shiftedChars = "~!@#$%^&*()_+{}|:\"<>?" // Copied from bootcommand/driver.go + + keyShift := unicode.IsUpper(key) || strings.ContainsRune(shiftedChars, key) + + var keys string + if keyShift { + keys = fmt.Sprintf(shiftFormat, key) + } else { + keys = fmt.Sprintf("%c", key) + } + + return p.send(keys) +} + +func (p *proxmoxDriver) SendSpecial(special string, action bootcommand.KeyAction) error { + keys := special + if replacement, ok := p.specialMap[special]; ok { + keys = replacement + } + + return p.send(keys) +} + +func (p *proxmoxDriver) send(keys string) error { + res, err := p.client.MonitorCmd(p.vmRef, "sendkey "+keys) + if err != nil { + return err + } + if data, ok := res["data"].(string); ok && len(data) > 0 { + return fmt.Errorf("failed to send keys: %s", data) + } + + time.Sleep(p.interval) + return nil +} + +func (p *proxmoxDriver) Flush() error { return nil } diff --git a/builder/proxmox/builder.go b/builder/proxmox/builder.go new file mode 100644 index 000000000..f44bcc7a3 --- /dev/null +++ b/builder/proxmox/builder.go @@ -0,0 +1,129 @@ +package proxmox + +import ( + "crypto/tls" + "fmt" + "log" + + "github.com/Telmate/proxmox-api-go/proxmox" + "github.com/hashicorp/packer/common" + "github.com/hashicorp/packer/helper/communicator" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +// The unique id for the builder +const BuilderId = "proxmox.builder" + +type Builder struct { + config Config + runner multistep.Runner + proxmoxClient *proxmox.Client +} + +// Builder implements packer.Builder +var _ packer.Builder = &Builder{} + +var pluginVersion = "1.0.0" + +func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { + config, warnings, errs := NewConfig(raws...) + if errs != nil { + return warnings, errs + } + b.config = *config + return nil, nil +} + +func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { + var err error + tlsConfig := &tls.Config{ + InsecureSkipVerify: b.config.SkipCertValidation, + } + b.proxmoxClient, err = proxmox.NewClient(b.config.ProxmoxURL.String(), nil, tlsConfig) + if err != nil { + return nil, err + } + + err = b.proxmoxClient.Login(b.config.Username, b.config.Password) + if err != nil { + return nil, err + } + + // Set up the state + state := new(multistep.BasicStateBag) + state.Put("config", &b.config) + state.Put("proxmoxClient", b.proxmoxClient) + state.Put("hook", hook) + state.Put("ui", ui) + + // Build the steps + steps := []multistep.Step{ + &stepStartVM{}, + &common.StepHTTPServer{ + HTTPDir: b.config.HTTPDir, + HTTPPortMin: b.config.HTTPPortMin, + HTTPPortMax: b.config.HTTPPortMax, + }, + &stepTypeBootCommand{ + BootConfig: b.config.BootConfig, + Ctx: b.config.ctx, + }, + &communicator.StepConnect{ + Config: &b.config.Comm, + Host: getVMIP, + SSHConfig: b.config.Comm.SSHConfigFunc(), + }, + &common.StepProvision{}, + &common.StepCleanupTempKeys{ + Comm: &b.config.Comm, + }, + &stepConvertToTemplate{}, + &stepFinalizeTemplateConfig{}, + &stepSuccess{}, + } + // Run the steps + b.runner = common.NewRunner(steps, b.config.PackerConfig, ui) + b.runner.Run(state) + // If there was an error, return that + if rawErr, ok := state.GetOk("error"); ok { + return nil, rawErr.(error) + } + + artifact := &Artifact{ + templateID: state.Get("template_id").(int), + proxmoxClient: b.proxmoxClient, + } + + return artifact, nil +} + +func (b *Builder) Cancel() { + if b.runner != nil { + log.Println("Cancelling the step runner...") + b.runner.Cancel() + } +} + +func getVMIP(state multistep.StateBag) (string, error) { + c := state.Get("proxmoxClient").(*proxmox.Client) + vmRef := state.Get("vmRef").(*proxmox.VmRef) + + ifs, err := c.GetVmAgentNetworkInterfaces(vmRef) + if err != nil { + return "", err + } + + // TODO: Do something smarter here? Allow specifying interface? Or address family? + // For now, just go for first non-loopback + for _, iface := range ifs { + for _, addr := range iface.IPAddresses { + if addr.IsLoopback() { + continue + } + return addr.String(), nil + } + } + + return "", fmt.Errorf("Found no IP addresses on VM") +} diff --git a/builder/proxmox/config.go b/builder/proxmox/config.go new file mode 100644 index 000000000..e35c408f3 --- /dev/null +++ b/builder/proxmox/config.go @@ -0,0 +1,196 @@ +package proxmox + +import ( + "errors" + "fmt" + "log" + "net/url" + "os" + "time" + + "github.com/hashicorp/packer/common" + "github.com/hashicorp/packer/common/bootcommand" + "github.com/hashicorp/packer/common/uuid" + "github.com/hashicorp/packer/helper/communicator" + "github.com/hashicorp/packer/helper/config" + "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/template/interpolate" + "github.com/mitchellh/mapstructure" +) + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + common.HTTPConfig `mapstructure:",squash"` + bootcommand.BootConfig `mapstructure:",squash"` + RawBootKeyInterval string `mapstructure:"boot_key_interval"` + BootKeyInterval time.Duration `` + Comm communicator.Config `mapstructure:",squash"` + + ProxmoxURLRaw string `mapstructure:"proxmox_url"` + ProxmoxURL *url.URL + SkipCertValidation bool `mapstructure:"insecure_skip_tls_verify"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + Node string `mapstructure:"node"` + + VMName string `mapstructure:"vm_name"` + VMID int `mapstructure:"vm_id"` + + Memory int `mapstructure:"memory"` + Cores int `mapstructure:"cores"` + Sockets int `mapstructure:"sockets"` + OS string `mapstructure:"os"` + NICs []nicConfig `mapstructure:"network_adapters"` + Disks []diskConfig `mapstructure:"disks"` + ISOFile string `mapstructure:"iso_file"` + + TemplateName string `mapstructure:"template_name"` + TemplateDescription string `mapstructure:"template_description"` + UnmountISO bool `mapstructure:"unmount_iso"` + + ctx interpolate.Context +} + +type nicConfig struct { + Model string `mapstructure:"model"` + MACAddress string `mapstructure:"mac_address"` + Bridge string `mapstructure:"bridge"` + VLANTag string `mapstructure:"vlan_tag"` +} +type diskConfig struct { + Type string `mapstructure:"type"` + StoragePool string `mapstructure:"storage_pool"` + StoragePoolType string `mapstructure:"storage_pool_type"` + Size string `mapstructure:"size"` + CacheMode string `mapstructure:"cache_mode"` + DiskFormat string `mapstructure:"format"` +} + +func NewConfig(raws ...interface{}) (*Config, []string, error) { + c := new(Config) + + var md mapstructure.Metadata + err := config.Decode(c, &config.DecodeOpts{ + Metadata: &md, + Interpolate: true, + InterpolateContext: &c.ctx, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "boot_command", + }, + }, + }, raws...) + if err != nil { + return nil, nil, err + } + + var errs *packer.MultiError + + // Defaults + if c.ProxmoxURLRaw == "" { + c.ProxmoxURLRaw = os.Getenv("PROXMOX_URL") + } + if c.Username == "" { + c.Username = os.Getenv("PROXMOX_USERNAME") + } + if c.Password == "" { + c.Password = os.Getenv("PROXMOX_PASSWORD") + } + if c.RawBootKeyInterval == "" { + c.RawBootKeyInterval = os.Getenv(common.PackerKeyEnv) + } + if c.RawBootKeyInterval == "" { + c.BootKeyInterval = common.PackerKeyDefault + } else { + if interval, err := time.ParseDuration(c.RawBootKeyInterval); err == nil { + c.BootKeyInterval = interval + } else { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("Could not parse boot_key_interval: %v", err)) + } + } + + if c.VMName == "" { + // Default to packer-[time-ordered-uuid] + c.VMName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) + } + if c.Memory < 16 { + log.Printf("Memory %d is too small, using default: 512", c.Memory) + c.Memory = 512 + } + if c.Cores < 1 { + log.Printf("Number of cores %d is too small, using default: 1", c.Cores) + c.Cores = 1 + } + if c.Sockets < 1 { + log.Printf("Number of sockets %d is too small, using default: 1", c.Sockets) + c.Sockets = 1 + } + if c.OS == "" { + log.Printf("OS not set, using default 'other'") + c.OS = "other" + } + for idx := range c.NICs { + if c.NICs[idx].Model == "" { + log.Printf("NIC %d model not set, using default 'e1000'", idx) + c.NICs[idx].Model = "e1000" + } + } + for idx := range c.Disks { + if c.Disks[idx].Type == "" { + log.Printf("Disk %d type not set, using default 'scsi'", idx) + c.Disks[idx].Type = "scsi" + } + if c.Disks[idx].Size == "" { + log.Printf("Disk %d size not set, using default '20G'", idx) + c.Disks[idx].Size = "20G" + } + if c.Disks[idx].CacheMode == "" { + log.Printf("Disk %d cache mode not set, using default 'none'", idx) + c.Disks[idx].CacheMode = "none" + } + } + + errs = packer.MultiErrorAppend(errs, c.Comm.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.BootConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.HTTPConfig.Prepare(&c.ctx)...) + + // Required configurations that will display errors if not set + if c.Username == "" { + errs = packer.MultiErrorAppend(errs, errors.New("username must be specified")) + } + if c.Password == "" { + errs = packer.MultiErrorAppend(errs, errors.New("password must be specified")) + } + if c.ProxmoxURLRaw == "" { + errs = packer.MultiErrorAppend(errs, errors.New("proxmox_url must be specified")) + } + if c.ProxmoxURL, err = url.Parse(c.ProxmoxURLRaw); err != nil { + errs = packer.MultiErrorAppend(errs, errors.New(fmt.Sprintf("Could not parse proxmox_url: %s", err))) + } + if c.ISOFile == "" { + errs = packer.MultiErrorAppend(errs, errors.New("iso_file must be specified")) + } + if c.Node == "" { + errs = packer.MultiErrorAppend(errs, errors.New("node must be specified")) + } + for idx := range c.NICs { + if c.NICs[idx].Bridge == "" { + errs = packer.MultiErrorAppend(errs, errors.New(fmt.Sprintf("network_adapters[%d].bridge must be specified", idx))) + } + } + for idx := range c.Disks { + if c.Disks[idx].StoragePool == "" { + errs = packer.MultiErrorAppend(errs, errors.New(fmt.Sprintf("disks[%d].storage_pool must be specified", idx))) + } + if c.Disks[idx].StoragePoolType == "" { + errs = packer.MultiErrorAppend(errs, errors.New(fmt.Sprintf("disks[%d].storage_pool_type must be specified", idx))) + } + } + + if errs != nil && len(errs.Errors) > 0 { + return nil, nil, errs + } + + packer.LogSecretFilter.Set(c.Password) + return c, nil, nil +} diff --git a/builder/proxmox/step_convert_to_template.go b/builder/proxmox/step_convert_to_template.go new file mode 100644 index 000000000..b19ec6fa8 --- /dev/null +++ b/builder/proxmox/step_convert_to_template.go @@ -0,0 +1,46 @@ +package proxmox + +import ( + "context" + "fmt" + + "github.com/Telmate/proxmox-api-go/proxmox" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +// stepConvertToTemplate takes the running VM configured in earlier steps, stops it, and +// converts it into a Proxmox template. +// +// It sets the template_id state which is used for Artifact lookup. +type stepConvertToTemplate struct{} + +func (s *stepConvertToTemplate) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + client := state.Get("proxmoxClient").(*proxmox.Client) + vmRef := state.Get("vmRef").(*proxmox.VmRef) + + ui.Say("Stopping VM") + _, err := client.ShutdownVm(vmRef) + if err != nil { + err := fmt.Errorf("Error converting VM to template, could not stop: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + ui.Say("Converting VM to template") + err = client.CreateTemplate(vmRef) + if err != nil { + err := fmt.Errorf("Error converting VM to template: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + state.Put("template_id", vmRef.VmId()) + + return multistep.ActionContinue +} + +func (s *stepConvertToTemplate) Cleanup(state multistep.StateBag) {} diff --git a/builder/proxmox/step_finalize_template_config.go b/builder/proxmox/step_finalize_template_config.go new file mode 100644 index 000000000..ab74b245b --- /dev/null +++ b/builder/proxmox/step_finalize_template_config.go @@ -0,0 +1,65 @@ +package proxmox + +import ( + "context" + "fmt" + "strings" + + "github.com/Telmate/proxmox-api-go/proxmox" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +// stepFinalizeTemplateConfig does any required modifications to the configuration _after_ +// the VM has been converted into a template, such as updating name and description, or +// unmounting the installation ISO. +type stepFinalizeTemplateConfig struct{} + +func (s *stepFinalizeTemplateConfig) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + client := state.Get("proxmoxClient").(*proxmox.Client) + c := state.Get("config").(*Config) + vmRef := state.Get("vmRef").(*proxmox.VmRef) + + changes := make(map[string]interface{}) + + if c.TemplateName != "" { + changes["name"] = c.TemplateName + } + + // During build, the description is "Packer ephemeral build VM", so if no description is + // set, we need to clear it + changes["description"] = c.TemplateDescription + + if c.UnmountISO { + vmParams, err := client.GetVmConfig(vmRef) + if err != nil { + err := fmt.Errorf("Error fetching template config: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + if !strings.HasSuffix(vmParams["ide2"].(string), "media=cdrom") { + err := fmt.Errorf("Cannot eject ISO from cdrom drive, ide2 is not present, or not a cdrom media") + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + changes["ide2"] = "none,media=cdrom" + } + + if len(changes) > 0 { + _, err := client.SetVmConfig(vmRef, changes) + if err != nil { + err := fmt.Errorf("Error updating template: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + } + + return multistep.ActionContinue +} + +func (s *stepFinalizeTemplateConfig) Cleanup(state multistep.StateBag) {} diff --git a/builder/proxmox/step_start_vm.go b/builder/proxmox/step_start_vm.go new file mode 100644 index 000000000..f3a839c11 --- /dev/null +++ b/builder/proxmox/step_start_vm.go @@ -0,0 +1,143 @@ +package proxmox + +import ( + "context" + "fmt" + "log" + + "github.com/Telmate/proxmox-api-go/proxmox" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +// stepStartVM takes the given configuration and starts a VM on the given Proxmox node. +// +// It sets the vmRef state which is used throughout the later steps to reference the VM +// in API calls. +type stepStartVM struct{} + +func (s *stepStartVM) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + client := state.Get("proxmoxClient").(*proxmox.Client) + c := state.Get("config").(*Config) + + ui.Say("Creating VM") + config := proxmox.ConfigQemu{ + Name: c.VMName, + Agent: "1", + Description: "Packer ephemeral build VM", + Memory: c.Memory, + QemuCores: c.Cores, + QemuSockets: c.Sockets, + QemuOs: c.OS, + QemuIso: c.ISOFile, + QemuNetworks: generateProxmoxNetworkAdapters(c.NICs), + QemuDisks: generateProxmoxDisks(c.Disks), + } + + if c.VMID == 0 { + ui.Say("No VM ID given, getting next free from Proxmox") + for n := 0; n < 5; n++ { + id, err := proxmox.MaxVmId(client) + if err != nil { + log.Printf("Error getting max used VM ID: %v (attempt %d/5)", err, n+1) + continue + } + c.VMID = id + 1 + break + } + if c.VMID == 0 { + err := fmt.Errorf("Failed to get free VM ID") + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + } + vmRef := proxmox.NewVmRef(c.VMID) + vmRef.SetNode(c.Node) + + err := config.CreateVm(vmRef, client) + if err != nil { + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + // Store the vm id for later + state.Put("vmRef", vmRef) + + ui.Say("Starting VM") + _, err = client.StartVm(vmRef) + if err != nil { + err := fmt.Errorf("Error starting VM: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func generateProxmoxNetworkAdapters(nics []nicConfig) proxmox.QemuDevices { + devs := make(proxmox.QemuDevices) + for idx := range nics { + devs[idx] = make(proxmox.QemuDevice) + setDeviceParamIfDefined(devs[idx], "model", nics[idx].Model) + setDeviceParamIfDefined(devs[idx], "macaddr", nics[idx].MACAddress) + setDeviceParamIfDefined(devs[idx], "bridge", nics[idx].Bridge) + setDeviceParamIfDefined(devs[idx], "tag", nics[idx].VLANTag) + } + return devs +} +func generateProxmoxDisks(disks []diskConfig) proxmox.QemuDevices { + devs := make(proxmox.QemuDevices) + for idx := range disks { + devs[idx] = make(proxmox.QemuDevice) + setDeviceParamIfDefined(devs[idx], "type", disks[idx].Type) + setDeviceParamIfDefined(devs[idx], "size", disks[idx].Size) + setDeviceParamIfDefined(devs[idx], "storage", disks[idx].StoragePool) + setDeviceParamIfDefined(devs[idx], "storage_type", disks[idx].StoragePoolType) + setDeviceParamIfDefined(devs[idx], "cache", disks[idx].CacheMode) + setDeviceParamIfDefined(devs[idx], "format", disks[idx].DiskFormat) + } + return devs +} + +func setDeviceParamIfDefined(dev proxmox.QemuDevice, key, value string) { + if value != "" { + dev[key] = value + } +} + +func (s *stepStartVM) Cleanup(state multistep.StateBag) { + vmRefUntyped, ok := state.GetOk("vmRef") + // If not ok, we probably errored out before creating the VM + if !ok { + return + } + vmRef := vmRefUntyped.(*proxmox.VmRef) + + // The vmRef will actually refer to the created template if everything + // finished successfully, so in that case we shouldn't cleanup + if _, ok := state.GetOk("success"); ok { + return + } + + client := state.Get("proxmoxClient").(*proxmox.Client) + ui := state.Get("ui").(packer.Ui) + + // Destroy the server we just created + ui.Say("Stopping VM") + _, err := client.StopVm(vmRef) + if err != nil { + ui.Error(fmt.Sprintf("Error stop VM. Please stop and delete it manually: %s", err)) + return + } + + ui.Say("Deleting VM") + _, err = client.DeleteVm(vmRef) + if err != nil { + ui.Error(fmt.Sprintf("Error deleting VM. Please delete it manually: %s", err)) + return + } +} diff --git a/builder/proxmox/step_success.go b/builder/proxmox/step_success.go new file mode 100644 index 000000000..06de9f21d --- /dev/null +++ b/builder/proxmox/step_success.go @@ -0,0 +1,22 @@ +package proxmox + +import ( + "context" + + "github.com/hashicorp/packer/helper/multistep" +) + +// stepSuccess runs after the full build has succeeded. +// +// It sets the success state, which ensures cleanup does not remove the finished template +type stepSuccess struct{} + +func (s *stepSuccess) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + // We need to ensure stepStartVM.Cleanup doesn't delete the template (no + // difference between VMs and templates when deleting) + state.Put("success", true) + + return multistep.ActionContinue +} + +func (s *stepSuccess) Cleanup(state multistep.StateBag) {} diff --git a/builder/proxmox/step_type_boot_command.go b/builder/proxmox/step_type_boot_command.go new file mode 100644 index 000000000..f76cd5e52 --- /dev/null +++ b/builder/proxmox/step_type_boot_command.go @@ -0,0 +1,110 @@ +package proxmox + +import ( + "context" + "errors" + "fmt" + "log" + "net" + "time" + + "github.com/Telmate/proxmox-api-go/proxmox" + "github.com/hashicorp/packer/common" + "github.com/hashicorp/packer/common/bootcommand" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/template/interpolate" +) + +// stepTypeBootCommand takes the started VM, and sends the keystrokes required to start +// the installation process such that Packer can later reach the VM over SSH/WinRM +type stepTypeBootCommand struct { + bootcommand.BootConfig + Ctx interpolate.Context +} + +type bootCommandTemplateData struct { + HTTPIP string + HTTPPort uint +} + +func (s *stepTypeBootCommand) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + c := state.Get("config").(*Config) + client := state.Get("proxmoxClient").(*proxmox.Client) + vmRef := state.Get("vmRef").(*proxmox.VmRef) + + if len(s.BootCommand) == 0 { + log.Println("No boot command given, skipping") + return multistep.ActionContinue + } + + if int64(s.BootWait) > 0 { + ui.Say(fmt.Sprintf("Waiting %s for boot", s.BootWait.String())) + select { + case <-time.After(s.BootWait): + break + case <-ctx.Done(): + return multistep.ActionHalt + } + } + + httpIP, err := hostIP() + if err != nil { + err := fmt.Errorf("Failed to determine host IP: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + common.SetHTTPIP(httpIP) + s.Ctx.Data = &bootCommandTemplateData{ + HTTPIP: httpIP, + HTTPPort: state.Get("http_port").(uint), + } + + ui.Say("Typing the boot command") + d := NewProxmoxDriver(client, vmRef, c.BootKeyInterval) + command, err := interpolate.Render(s.FlatBootCommand(), &s.Ctx) + if err != nil { + err := fmt.Errorf("Error preparing boot command: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + seq, err := bootcommand.GenerateExpressionSequence(command) + if err != nil { + err := fmt.Errorf("Error generating boot command: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + if err := seq.Do(ctx, d); err != nil { + err := fmt.Errorf("Error running boot command: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (*stepTypeBootCommand) Cleanup(multistep.StateBag) {} + +func hostIP() (string, error) { + addrs, err := net.InterfaceAddrs() + if err != nil { + return "", err + } + + for _, addr := range addrs { + if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { + if ipnet.IP.To4() != nil { + return ipnet.IP.String(), nil + } + } + } + + return "", errors.New("No host IP found") +} diff --git a/command/plugin.go b/command/plugin.go index b0b7149c5..054988726 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -40,6 +40,7 @@ import ( parallelsisobuilder "github.com/hashicorp/packer/builder/parallels/iso" parallelspvmbuilder "github.com/hashicorp/packer/builder/parallels/pvm" profitbricksbuilder "github.com/hashicorp/packer/builder/profitbricks" + proxmoxbuilder "github.com/hashicorp/packer/builder/proxmox" qemubuilder "github.com/hashicorp/packer/builder/qemu" scalewaybuilder "github.com/hashicorp/packer/builder/scaleway" tencentcloudcvmbuilder "github.com/hashicorp/packer/builder/tencentcloud/cvm" @@ -117,6 +118,7 @@ var Builders = map[string]packer.Builder{ "parallels-iso": new(parallelsisobuilder.Builder), "parallels-pvm": new(parallelspvmbuilder.Builder), "profitbricks": new(profitbricksbuilder.Builder), + "proxmox": new(proxmoxbuilder.Builder), "qemu": new(qemubuilder.Builder), "scaleway": new(scalewaybuilder.Builder), "tencentcloud-cvm": new(tencentcloudcvmbuilder.Builder), From 1d16b4ec411a99c77a222b1747763104d6169665 Mon Sep 17 00:00:00 2001 From: Calle Pettersson Date: Wed, 13 Mar 2019 19:35:50 +0100 Subject: [PATCH 30/47] Add docs --- website/source/docs/builders/proxmox.html.md | 201 +++++++++++++++++++ 1 file changed, 201 insertions(+) create mode 100644 website/source/docs/builders/proxmox.html.md diff --git a/website/source/docs/builders/proxmox.html.md b/website/source/docs/builders/proxmox.html.md new file mode 100644 index 000000000..f5c4fcb56 --- /dev/null +++ b/website/source/docs/builders/proxmox.html.md @@ -0,0 +1,201 @@ +--- +description: | + The proxmox Packer builder is able to create new images for use with + Proxmox VE. The builder takes an ISO source, runs any provisioning + necessary on the image after launching it, then creates a virtual machine + template. +layout: docs +page_title: 'Proxmox - Builders' +sidebar_current: 'docs-builders-proxmox' +--- + +# Proxmox Builder + +Type: `proxmox` + +The `proxmox` Packer builder is able to create new images for use with +[Proxmox](https://www.proxmox.com/en/proxmox-ve). The builder takes an ISO +image, runs any provisioning necessary on the image after launching it, then +creates a virtual machine template. This template can then be used as to +create new virtual machines within Proxmox. + +The builder does *not* manage templates. Once it creates a template, it is up +to you to use it or delete it. + +## Configuration Reference + +There are many configuration options available for the builder. They are +segmented below into two categories: required and optional parameters. Within +each category, the available configuration keys are alphabetized. + +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) can be configured for this +builder. + +### Required: + +- `proxmox_url` (string) - URL to the Proxmox API, including the full path, + so `https://:/api2/json` for example. + Can also be set via the `PROXMOX_URL` environment variable. + +- `username` (string) - Username when authenticating to Proxmox, including + the realm. For example `user@pve` to use the local Proxmox realm. + Can also be set via the `PROXMOX_USERNAME` environment variable. + +- `password` (string) - Password for the user. + Can also be set via the `PROXMOX_PASSWORD` environment variable. + +- `node` (string) - Which node in the Proxmox cluster to start the virtual + machine on during creation. + +- `iso_file` (string) - Path to the ISO file to boot from, expressed as a + proxmox datastore path, for example + `local:iso/Fedora-Server-dvd-x86_64-29-1.2.iso` + +### Optional: +- `insecure_skip_tls_verify` (bool) - Skip validating the certificate. + +- `vm_name` (string) - Name of the virtual machine during creation. If not + given, a random uuid will be used. + +- `vm_id` (int) - The ID used to reference the virtual machine. This will + also be the ID of the final template. If not given, the next free ID on + the node will be used. + +- `memory` (int) - How much memory, in megabytes, to give the virtual + machine. Defaults to `512`. + +- `cores` (int) - How many CPU cores to give the virtual machine. Defaults + to `1`. + +- `sockets` (int) - How many CPU sockets to give the virtual machine. + Defaults to `1` + +- `os` (string) - The operating system. Can be `linux`, `windows`, `solaris` + or `other`. Defaults to `other`. + +- `network_adapters` (array of objects) - Network adapters attached to the + virtual machine. Example: + + ```json + [ + { + "model": "virtio", + "bridge": "vmbr0", + "vlan_tag": "10" + } + ] + ``` + + - `bridge` (string) - Required. Which Proxmox bridge to attach the + adapter to. + + - `model` (string) - Model of the virtual network adapter. Can be + `rtl8139`, `ne2k_pci`, `e1000`, `pcnet`, `virtio`, `ne2k_isa`, + `i82551`, `i82557b`, `i82559er`, `vmxnet3`, `e1000-82540em`, + `e1000-82544gc` or `e1000-82545em`. Defaults to `e1000`. + + - `mac_address` (string) - Give the adapter a specific MAC address. If + not set, defaults to a random MAC. + + - `vlan_tag` (string) - If the adapter should tag packets. Defaults to + no tagging. + +- `disks` (array of objects) - Disks attached to the virtual machine. + Example: + + ```json + [ + { + "type": "scsi", + "size": "5G", + "storage_pool": "local-lvm", + "storage_pool_type": "lvm" + } + ] + ``` + + - `storage_pool` (string) - Required. Name of the Proxmox storage pool + to store the virtual machine disk on. A `local-lvm` pool is allocated + by the installer, for example. + + - `storage_pool_type` (string) - Required. The type of the pool, can + be `lvm`, `lvm-thin`, `zfs` or `directory`. + + - `type` (string) - The type of disk. Can be `scsi`, `sata`, `virtio` or + `ide`. Defaults to `scsi`. + + - `size` (string) - The size of the disk, including a unit suffix, such + as `10G` to indicate 10 gigabytes. + + - `cache_mode` (string) - How to cache operations to the disk. Can be + `none`, `writethrough`, `writeback`, `unsafe` or `directsync`. + Defaults to `none`. + + - `format` (string) - The format of the file backing the disk. Can be + `raw`, `cow`, `qcow`, `qed`, `qcow2`, `vmdk` or `cloop`. Defaults to + `raw`. + +- `template_name` (string) - Name of the template. Defaults to the generated + name used during creation. + +- `template_description` (string) - Description of the template, visible in + the Proxmox interface. + +- `unmount_iso` (bool) - If true, remove the mounted ISO from the template + after finishing. Defaults to `false`. + + +## Example: Fedora with kickstart + +Here is a basic example creating a Fedora 29 server image with a Kickstart +file served with Packer's HTTP server. Note that the iso file needs to be +manually downloaded. + +``` json +{ + "variables": { + "username": "apiuser@pve", + "password": "supersecret" + }, + "builders": [ + { + "type": "proxmox", + "proxmox_url": "https://my-proxmox.my-domain:8006/api2/json", + "insecure_skip_tls_verify": true, + "username": "{{user `username`}}", + "password": "{{user `password`}}", + + "node": "my-proxmox", + "network_adapters": [ + { + "bridge": "vmbr0" + } + ], + "disks": [ + { + "type": "scsi", + "size": "5G", + "storage_pool": "local-lvm", + "storage_pool_type": "lvm" + } + ], + + "iso_file": "local:iso/Fedora-Server-dvd-x86_64-29-1.2.iso", + "http_directory":"config", + "boot_wait": "10s", + "boot_command": [ + " ip=dhcp inst.cmdline inst.ks=http://{{.HTTPIP}}:{{.HTTPPort}}/ks.cfg" + ], + + "ssh_username": "root", + "ssh_timeout": "15m", + "ssh_password": "packer", + + "unmount_iso": true, + "template_name": "fedora-29", + "template_description": "Fedora 29-1.2, generated on {{ isotime \"2006-01-02T15:04:05Z\" }}" + } + ] +} +``` From 2e3086be5a1438557421cd70892d3e0ea1647dee Mon Sep 17 00:00:00 2001 From: Calle Pettersson Date: Wed, 13 Mar 2019 20:51:11 +0100 Subject: [PATCH 31/47] Initial tests --- builder/proxmox/config_test.go | 115 +++++++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) create mode 100644 builder/proxmox/config_test.go diff --git a/builder/proxmox/config_test.go b/builder/proxmox/config_test.go new file mode 100644 index 000000000..a7496dddd --- /dev/null +++ b/builder/proxmox/config_test.go @@ -0,0 +1,115 @@ +package proxmox + +import ( + "strings" + "testing" + + "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/template" +) + +func TestRequiredParameters(t *testing.T) { + _, _, err := NewConfig(make(map[string]interface{})) + if err == nil { + t.Fatal("Expected empty configuration to fail") + } + errs, ok := err.(*packer.MultiError) + if !ok { + t.Fatal("Expected errors to be packer.MultiError") + } + + required := []string{"username", "password", "proxmox_url", "iso_file", "node", "ssh_username"} + for _, param := range required { + found := false + for _, err := range errs.Errors { + if strings.Contains(err.Error(), param) { + found = true + break + } + } + if !found { + t.Errorf("Expected error about missing parameter %q", required) + } + } +} + +func TestBasicExampleFromDocsIsValid(t *testing.T) { + const config = `{ + "builders": [ + { + "type": "proxmox", + "proxmox_url": "https://my-proxmox.my-domain:8006/api2/json", + "insecure_skip_tls_verify": true, + "username": "apiuser@pve", + "password": "supersecret", + + "node": "my-proxmox", + "network_adapters": [ + { + "bridge": "vmbr0" + } + ], + "disks": [ + { + "type": "scsi", + "size": "5G", + "storage_pool": "local-lvm", + "storage_pool_type": "lvm" + } + ], + + "iso_file": "local:iso/Fedora-Server-dvd-x86_64-29-1.2.iso", + "http_directory":"config", + "boot_wait": "10s", + "boot_command": [ + " ip=dhcp inst.cmdline inst.ks=http://{{.HTTPIP}}:{{.HTTPPort}}/ks.cfg" + ], + + "ssh_username": "root", + "ssh_timeout": "15m", + "ssh_password": "packer", + + "unmount_iso": true, + "template_name": "fedora-29", + "template_description": "Fedora 29-1.2, generated on {{ isotime \"2006-01-02T15:04:05Z\" }}" + } + ] +}` + tpl, err := template.Parse(strings.NewReader(config)) + if err != nil { + t.Fatal(err) + } + + b := &Builder{} + warn, err := b.Prepare(tpl.Builders["proxmox"].Config) + if err != nil { + t.Fatal(err, warn) + } + + // The example config does not set a number of optional fields. Validate that: + // Memory 0 is too small, using default: 512 + // Number of cores 0 is too small, using default: 1 + // Number of sockets 0 is too small, using default: 1 + // OS not set, using default 'other' + // NIC 0 model not set, using default 'e1000' + // Disk 0 cache mode not set, using default 'none' + + if b.config.Memory != 512 { + t.Errorf("Expected Memory to be 512, got %d", b.config.Memory) + } + if b.config.Cores != 1 { + t.Errorf("Expected Cores to be 1, got %d", b.config.Cores) + } + if b.config.Sockets != 1 { + t.Errorf("Expected Sockets to be 1, got %d", b.config.Sockets) + } + if b.config.OS != "other" { + t.Errorf("Expected OS to be 'other', got %s", b.config.OS) + } + if b.config.NICs[0].Model != "e1000" { + t.Errorf("Expected NIC model to be 'e1000', got %s", b.config.NICs[0].Model) + } + if b.config.Disks[0].CacheMode != "none" { + t.Errorf("Expected disk cache mode to be 'none', got %s", b.config.Disks[0].CacheMode) + } +} From 5eb600bf88a47976bf52d340d158f573f9c63cfc Mon Sep 17 00:00:00 2001 From: Calle Pettersson Date: Wed, 13 Mar 2019 21:21:14 +0100 Subject: [PATCH 32/47] Add draft of step test --- .../proxmox/step_finalize_template_config.go | 7 ++- .../step_finalize_template_config_test.go | 63 +++++++++++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 builder/proxmox/step_finalize_template_config_test.go diff --git a/builder/proxmox/step_finalize_template_config.go b/builder/proxmox/step_finalize_template_config.go index ab74b245b..2e442654a 100644 --- a/builder/proxmox/step_finalize_template_config.go +++ b/builder/proxmox/step_finalize_template_config.go @@ -15,9 +15,14 @@ import ( // unmounting the installation ISO. type stepFinalizeTemplateConfig struct{} +type templateFinalizer interface { + GetVmConfig(*proxmox.VmRef) (map[string]interface{}, error) + SetVmConfig(*proxmox.VmRef, map[string]interface{}) (string, error) +} + func (s *stepFinalizeTemplateConfig) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) - client := state.Get("proxmoxClient").(*proxmox.Client) + client := state.Get("proxmoxClient").(templateFinalizer) c := state.Get("config").(*Config) vmRef := state.Get("vmRef").(*proxmox.VmRef) diff --git a/builder/proxmox/step_finalize_template_config_test.go b/builder/proxmox/step_finalize_template_config_test.go new file mode 100644 index 000000000..1f0a82541 --- /dev/null +++ b/builder/proxmox/step_finalize_template_config_test.go @@ -0,0 +1,63 @@ +package proxmox + +import ( + "context" + "testing" + + "github.com/Telmate/proxmox-api-go/proxmox" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +type finalizerMock struct { + getConfig func() (map[string]interface{}, error) + setConfig func(map[string]interface{}) (string, error) +} + +func (m finalizerMock) GetVmConfig(*proxmox.VmRef) (map[string]interface{}, error) { + return m.getConfig() +} +func (m finalizerMock) SetVmConfig(vmref *proxmox.VmRef, c map[string]interface{}) (string, error) { + return m.setConfig(c) +} + +func TestTemplateFinalize(t *testing.T) { + finalizer := finalizerMock{ + getConfig: func() (map[string]interface{}, error) { + return map[string]interface{}{ + "name": "dummy", + "description": "Packer ephemeral build VM", + "ide2": "local:iso/Fedora-Server-dvd-x86_64-29-1.2.iso,media=cdrom", + }, nil + }, + setConfig: func(c map[string]interface{}) (string, error) { + if c["name"] != "my-template" { + t.Errorf("Expected name to be my-template, got %q", c["name"]) + } + if c["description"] != "foo" { + t.Errorf("Expected description to be foo, got %q", c["description"]) + } + if c["ide2"] != "none,media=cdrom" { + t.Errorf("Expected ide2 to be none,media=cdrom, got %q", c["ide2"]) + } + + return "", nil + }, + } + + state := new(multistep.BasicStateBag) + state.Put("ui", packer.TestUi(t)) + state.Put("config", &Config{ + TemplateName: "my-template", + TemplateDescription: "foo", + UnmountISO: true, + }) + state.Put("vmRef", proxmox.NewVmRef(1)) + state.Put("proxmoxClient", finalizer) + + step := stepFinalizeTemplateConfig{} + action := step.Run(context.TODO(), state) + if action != multistep.ActionContinue { + t.Error("Expected action to be Continue, got Halt") + } +} From 3d5f433b225eb46283fe8ce0d29fac3f06d1d0c9 Mon Sep 17 00:00:00 2001 From: Calle Pettersson Date: Thu, 14 Mar 2019 20:21:46 +0100 Subject: [PATCH 33/47] Add more finalizetemplate tests, fix found bug --- .../proxmox/step_finalize_template_config.go | 2 +- .../step_finalize_template_config_test.go | 144 ++++++++++++++---- 2 files changed, 116 insertions(+), 30 deletions(-) diff --git a/builder/proxmox/step_finalize_template_config.go b/builder/proxmox/step_finalize_template_config.go index 2e442654a..c6de188ef 100644 --- a/builder/proxmox/step_finalize_template_config.go +++ b/builder/proxmox/step_finalize_template_config.go @@ -45,7 +45,7 @@ func (s *stepFinalizeTemplateConfig) Run(ctx context.Context, state multistep.St return multistep.ActionHalt } - if !strings.HasSuffix(vmParams["ide2"].(string), "media=cdrom") { + if vmParams["ide2"] == nil || !strings.HasSuffix(vmParams["ide2"].(string), "media=cdrom") { err := fmt.Errorf("Cannot eject ISO from cdrom drive, ide2 is not present, or not a cdrom media") state.Put("error", err) ui.Error(err.Error()) diff --git a/builder/proxmox/step_finalize_template_config_test.go b/builder/proxmox/step_finalize_template_config_test.go index 1f0a82541..58de6c966 100644 --- a/builder/proxmox/step_finalize_template_config_test.go +++ b/builder/proxmox/step_finalize_template_config_test.go @@ -2,6 +2,7 @@ package proxmox import ( "context" + "fmt" "testing" "github.com/Telmate/proxmox-api-go/proxmox" @@ -22,42 +23,127 @@ func (m finalizerMock) SetVmConfig(vmref *proxmox.VmRef, c map[string]interface{ } func TestTemplateFinalize(t *testing.T) { - finalizer := finalizerMock{ - getConfig: func() (map[string]interface{}, error) { - return map[string]interface{}{ + cs := []struct { + name string + builderConfig *Config + initialVMConfig map[string]interface{} + getConfigErr error + expectCallSetConfig bool + expectedVMConfig map[string]interface{} + setConfigErr error + expectedAction multistep.StepAction + }{ + { + name: "empty config changes only description", + builderConfig: &Config{}, + initialVMConfig: map[string]interface{}{ "name": "dummy", "description": "Packer ephemeral build VM", "ide2": "local:iso/Fedora-Server-dvd-x86_64-29-1.2.iso,media=cdrom", - }, nil + }, + expectCallSetConfig: true, + expectedVMConfig: map[string]interface{}{ + "name": nil, + "description": "", + "ide2": nil, + }, + expectedAction: multistep.ActionContinue, }, - setConfig: func(c map[string]interface{}) (string, error) { - if c["name"] != "my-template" { - t.Errorf("Expected name to be my-template, got %q", c["name"]) - } - if c["description"] != "foo" { - t.Errorf("Expected description to be foo, got %q", c["description"]) - } - if c["ide2"] != "none,media=cdrom" { - t.Errorf("Expected ide2 to be none,media=cdrom, got %q", c["ide2"]) - } - - return "", nil + { + name: "all options", + builderConfig: &Config{ + TemplateName: "my-template", + TemplateDescription: "some-description", + UnmountISO: true, + }, + initialVMConfig: map[string]interface{}{ + "name": "dummy", + "description": "Packer ephemeral build VM", + "ide2": "local:iso/Fedora-Server-dvd-x86_64-29-1.2.iso,media=cdrom", + }, + expectCallSetConfig: true, + expectedVMConfig: map[string]interface{}{ + "name": "my-template", + "description": "some-description", + "ide2": "none,media=cdrom", + }, + expectedAction: multistep.ActionContinue, + }, + { + name: "no cd-drive with unmount=true should returns halt", + builderConfig: &Config{ + TemplateName: "my-template", + TemplateDescription: "some-description", + UnmountISO: true, + }, + initialVMConfig: map[string]interface{}{ + "name": "dummy", + "description": "Packer ephemeral build VM", + "ide1": "local:iso/Fedora-Server-dvd-x86_64-29-1.2.iso,media=cdrom", + }, + expectCallSetConfig: false, + expectedAction: multistep.ActionHalt, + }, + { + name: "GetVmConfig error should return halt", + builderConfig: &Config{ + TemplateName: "my-template", + TemplateDescription: "some-description", + UnmountISO: true, + }, + getConfigErr: fmt.Errorf("some error"), + expectCallSetConfig: false, + expectedAction: multistep.ActionHalt, + }, + { + name: "SetVmConfig error should return halt", + builderConfig: &Config{ + TemplateName: "my-template", + TemplateDescription: "some-description", + UnmountISO: true, + }, + initialVMConfig: map[string]interface{}{ + "name": "dummy", + "description": "Packer ephemeral build VM", + "ide2": "local:iso/Fedora-Server-dvd-x86_64-29-1.2.iso,media=cdrom", + }, + expectCallSetConfig: true, + setConfigErr: fmt.Errorf("some error"), + expectedAction: multistep.ActionHalt, }, } - state := new(multistep.BasicStateBag) - state.Put("ui", packer.TestUi(t)) - state.Put("config", &Config{ - TemplateName: "my-template", - TemplateDescription: "foo", - UnmountISO: true, - }) - state.Put("vmRef", proxmox.NewVmRef(1)) - state.Put("proxmoxClient", finalizer) + for _, c := range cs { + t.Run(c.name, func(t *testing.T) { + finalizer := finalizerMock{ + getConfig: func() (map[string]interface{}, error) { + return c.initialVMConfig, c.getConfigErr + }, + setConfig: func(cfg map[string]interface{}) (string, error) { + if !c.expectCallSetConfig { + t.Error("Did not expect SetVmConfig to be called") + } + for key, val := range c.expectedVMConfig { + if cfg[key] != val { + t.Errorf("Expected %q to be %q, got %q", key, val, cfg[key]) + } + } - step := stepFinalizeTemplateConfig{} - action := step.Run(context.TODO(), state) - if action != multistep.ActionContinue { - t.Error("Expected action to be Continue, got Halt") + return "", c.setConfigErr + }, + } + + state := new(multistep.BasicStateBag) + state.Put("ui", packer.TestUi(t)) + state.Put("config", c.builderConfig) + state.Put("vmRef", proxmox.NewVmRef(1)) + state.Put("proxmoxClient", finalizer) + + step := stepFinalizeTemplateConfig{} + action := step.Run(context.TODO(), state) + if action != c.expectedAction { + t.Errorf("Expected action to be %v, got %v", c.expectedAction, action) + } + }) } } From 28ca0f71b599fa0e7e45ac0e358655e4737a8b6a Mon Sep 17 00:00:00 2001 From: Calle Pettersson Date: Thu, 14 Mar 2019 20:41:52 +0100 Subject: [PATCH 34/47] Add tests for step_convert_to_template --- builder/proxmox/step_convert_to_template.go | 7 +- .../proxmox/step_convert_to_template_test.go | 101 ++++++++++++++++++ 2 files changed, 107 insertions(+), 1 deletion(-) create mode 100644 builder/proxmox/step_convert_to_template_test.go diff --git a/builder/proxmox/step_convert_to_template.go b/builder/proxmox/step_convert_to_template.go index b19ec6fa8..9e514f3d6 100644 --- a/builder/proxmox/step_convert_to_template.go +++ b/builder/proxmox/step_convert_to_template.go @@ -15,9 +15,14 @@ import ( // It sets the template_id state which is used for Artifact lookup. type stepConvertToTemplate struct{} +type templateConverter interface { + ShutdownVm(*proxmox.VmRef) (string, error) + CreateTemplate(*proxmox.VmRef) error +} + func (s *stepConvertToTemplate) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) - client := state.Get("proxmoxClient").(*proxmox.Client) + client := state.Get("proxmoxClient").(templateConverter) vmRef := state.Get("vmRef").(*proxmox.VmRef) ui.Say("Stopping VM") diff --git a/builder/proxmox/step_convert_to_template_test.go b/builder/proxmox/step_convert_to_template_test.go new file mode 100644 index 000000000..7df852627 --- /dev/null +++ b/builder/proxmox/step_convert_to_template_test.go @@ -0,0 +1,101 @@ +package proxmox + +import ( + "context" + "fmt" + "testing" + + "github.com/Telmate/proxmox-api-go/proxmox" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +type converterMock struct { + shutdownVm func(*proxmox.VmRef) (string, error) + createTemplate func(*proxmox.VmRef) error +} + +func (m converterMock) ShutdownVm(r *proxmox.VmRef) (string, error) { + return m.shutdownVm(r) +} +func (m converterMock) CreateTemplate(r *proxmox.VmRef) error { + return m.createTemplate(r) +} + +func TestConvertToTemplate(t *testing.T) { + cs := []struct { + name string + shutdownErr error + expectCallCreateTemplate bool + createTemplateErr error + expectedAction multistep.StepAction + expectTemplateIdSet bool + }{ + { + name: "no errors returns continue and sets template id", + expectCallCreateTemplate: true, + expectedAction: multistep.ActionContinue, + expectTemplateIdSet: true, + }, + { + name: "when shutdown fails, don't try to create template and halt", + shutdownErr: fmt.Errorf("failed to stop vm"), + expectCallCreateTemplate: false, + expectedAction: multistep.ActionHalt, + expectTemplateIdSet: false, + }, + { + name: "when create template fails, halt", + expectCallCreateTemplate: true, + createTemplateErr: fmt.Errorf("failed to stop vm"), + expectedAction: multistep.ActionHalt, + expectTemplateIdSet: false, + }, + } + + const vmid = 123 + + for _, c := range cs { + t.Run(c.name, func(t *testing.T) { + converter := converterMock{ + shutdownVm: func(r *proxmox.VmRef) (string, error) { + if r.VmId() != vmid { + t.Errorf("ShutdownVm called with unexpected id, expected %d, got %d", vmid, r.VmId()) + } + return "", c.shutdownErr + }, + createTemplate: func(r *proxmox.VmRef) error { + if r.VmId() != vmid { + t.Errorf("CreateTemplate called with unexpected id, expected %d, got %d", vmid, r.VmId()) + } + if !c.expectCallCreateTemplate { + t.Error("Did not expect CreateTemplate to be called") + } + + return c.createTemplateErr + }, + } + + state := new(multistep.BasicStateBag) + state.Put("ui", packer.TestUi(t)) + state.Put("vmRef", proxmox.NewVmRef(vmid)) + state.Put("proxmoxClient", converter) + + step := stepConvertToTemplate{} + action := step.Run(context.TODO(), state) + if action != c.expectedAction { + t.Errorf("Expected action to be %v, got %v", c.expectedAction, action) + } + + id, wasSet := state.GetOk("template_id") + + if c.expectTemplateIdSet != wasSet { + t.Errorf("Expected template_id state present=%v was present=%v", c.expectTemplateIdSet, wasSet) + } + + if c.expectTemplateIdSet && id != vmid { + t.Errorf("Expected template_id state to be set to %d, got %v", vmid, id) + } + }) + } +} From 0765bc2283fc65abd454987929f31a5f264c4564 Mon Sep 17 00:00:00 2001 From: Calle Pettersson Date: Thu, 14 Mar 2019 22:21:48 +0100 Subject: [PATCH 35/47] Add tests for step_type_boot_command, fix found bug (shifted chars were not lower cased) --- builder/proxmox/bootcommand_driver.go | 6 +- builder/proxmox/step_type_boot_command.go | 6 +- .../proxmox/step_type_boot_command_test.go | 124 ++++++++++++++++++ 3 files changed, 132 insertions(+), 4 deletions(-) create mode 100644 builder/proxmox/step_type_boot_command_test.go diff --git a/builder/proxmox/bootcommand_driver.go b/builder/proxmox/bootcommand_driver.go index b8debaafb..4eeb48f5f 100644 --- a/builder/proxmox/bootcommand_driver.go +++ b/builder/proxmox/bootcommand_driver.go @@ -11,14 +11,14 @@ import ( ) type proxmoxDriver struct { - client *proxmox.Client + client commandTyper vmRef *proxmox.VmRef specialMap map[string]string runeMap map[rune]string interval time.Duration } -func NewProxmoxDriver(c *proxmox.Client, vmRef *proxmox.VmRef, interval time.Duration) *proxmoxDriver { +func NewProxmoxDriver(c commandTyper, vmRef *proxmox.VmRef, interval time.Duration) *proxmoxDriver { // Mappings for packer shorthand to qemu qkeycodes sMap := map[string]string{ "spacebar": "spc", @@ -90,7 +90,7 @@ func (p *proxmoxDriver) SendKey(key rune, action bootcommand.KeyAction) error { var keys string if keyShift { - keys = fmt.Sprintf(shiftFormat, key) + keys = fmt.Sprintf(shiftFormat, unicode.ToLower(key)) } else { keys = fmt.Sprintf("%c", key) } diff --git a/builder/proxmox/step_type_boot_command.go b/builder/proxmox/step_type_boot_command.go index f76cd5e52..1967b09ca 100644 --- a/builder/proxmox/step_type_boot_command.go +++ b/builder/proxmox/step_type_boot_command.go @@ -28,10 +28,14 @@ type bootCommandTemplateData struct { HTTPPort uint } +type commandTyper interface { + MonitorCmd(*proxmox.VmRef, string) (map[string]interface{}, error) +} + func (s *stepTypeBootCommand) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) c := state.Get("config").(*Config) - client := state.Get("proxmoxClient").(*proxmox.Client) + client := state.Get("proxmoxClient").(commandTyper) vmRef := state.Get("vmRef").(*proxmox.VmRef) if len(s.BootCommand) == 0 { diff --git a/builder/proxmox/step_type_boot_command_test.go b/builder/proxmox/step_type_boot_command_test.go new file mode 100644 index 000000000..efe8b4c4c --- /dev/null +++ b/builder/proxmox/step_type_boot_command_test.go @@ -0,0 +1,124 @@ +package proxmox + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/Telmate/proxmox-api-go/proxmox" + "github.com/hashicorp/packer/common/bootcommand" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +type commandTyperMock struct { + monitorCmd func(*proxmox.VmRef, string) (map[string]interface{}, error) +} + +func (m commandTyperMock) MonitorCmd(ref *proxmox.VmRef, cmd string) (map[string]interface{}, error) { + return m.monitorCmd(ref, cmd) +} + +func TestTypeBootCommand(t *testing.T) { + cs := []struct { + name string + builderConfig *Config + expectCallMonitorCmd bool + monitorCmdErr error + monitorCmdRet map[string]interface{} + expectedKeysSent string + expectedAction multistep.StepAction + }{ + { + name: "simple boot command is typed", + builderConfig: &Config{BootConfig: bootcommand.BootConfig{BootCommand: []string{"hello"}}}, + expectCallMonitorCmd: true, + expectedKeysSent: "hello", + expectedAction: multistep.ActionContinue, + }, + { + name: "interpolated boot command", + builderConfig: &Config{BootConfig: bootcommand.BootConfig{BootCommand: []string{"helloworld"}}}, + expectCallMonitorCmd: true, + expectedKeysSent: "helloretworld", + expectedAction: multistep.ActionContinue, + }, + { + name: "merge multiple interpolated boot command", + builderConfig: &Config{BootConfig: bootcommand.BootConfig{BootCommand: []string{"Hello World 2.0", "foo!bar@baz"}}}, + expectCallMonitorCmd: true, + expectedKeysSent: "shift-hellospcshift-worldspc2dot0fooshift-1barshift-2baz", + expectedAction: multistep.ActionContinue, + }, + { + name: "without boot command monitorcmd should not be called", + builderConfig: &Config{BootConfig: bootcommand.BootConfig{BootCommand: []string{}}}, + expectCallMonitorCmd: false, + expectedAction: multistep.ActionContinue, + }, + { + name: "invalid boot command template function", + builderConfig: &Config{BootConfig: bootcommand.BootConfig{BootCommand: []string{"{{ foo }}"}}}, + expectCallMonitorCmd: false, + expectedAction: multistep.ActionHalt, + }, + { + // When proxmox (or Qemu, really) doesn't recognize the keycode we send, we get no error back, but + // a map {"data": "invalid parameter: X"}, where X is the keycode. + name: "invalid keys sent to proxmox", + builderConfig: &Config{BootConfig: bootcommand.BootConfig{BootCommand: []string{"x"}}}, + expectCallMonitorCmd: true, + monitorCmdRet: map[string]interface{}{"data": "invalid parameter: x"}, + expectedKeysSent: "x", + expectedAction: multistep.ActionHalt, + }, + { + name: "error in typing should return halt", + builderConfig: &Config{BootConfig: bootcommand.BootConfig{BootCommand: []string{"hello"}}}, + expectCallMonitorCmd: true, + monitorCmdErr: fmt.Errorf("some error"), + expectedKeysSent: "h", + expectedAction: multistep.ActionHalt, + }, + } + + for _, c := range cs { + t.Run(c.name, func(t *testing.T) { + accumulator := strings.Builder{} + typer := commandTyperMock{ + monitorCmd: func(ref *proxmox.VmRef, cmd string) (map[string]interface{}, error) { + if !c.expectCallMonitorCmd { + t.Error("Did not expect MonitorCmd to be called") + } + if !strings.HasPrefix(cmd, "sendkey ") { + t.Errorf("Expected all commands to be sendkey, got %s", cmd) + } + + accumulator.WriteString(strings.TrimPrefix(cmd, "sendkey ")) + + return c.monitorCmdRet, c.monitorCmdErr + }, + } + + state := new(multistep.BasicStateBag) + state.Put("ui", packer.TestUi(t)) + state.Put("config", c.builderConfig) + state.Put("http_port", uint(0)) + state.Put("vmRef", proxmox.NewVmRef(1)) + state.Put("proxmoxClient", typer) + + step := stepTypeBootCommand{ + c.builderConfig.BootConfig, + c.builderConfig.ctx, + } + action := step.Run(context.TODO(), state) + if action != c.expectedAction { + t.Errorf("Expected action to be %v, got %v", c.expectedAction, action) + } + if c.expectedKeysSent != accumulator.String() { + t.Errorf("Expected keystrokes to be %q, got %q", c.expectedKeysSent, accumulator.String()) + } + }) + } +} From 4c1fbfdd61dcf0cf8a1aadaca5513827ebb9dc06 Mon Sep 17 00:00:00 2001 From: Calle Pettersson Date: Thu, 14 Mar 2019 22:26:55 +0100 Subject: [PATCH 36/47] Shifted special runes are already handled, simplify SendKey --- builder/proxmox/bootcommand_driver.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/builder/proxmox/bootcommand_driver.go b/builder/proxmox/bootcommand_driver.go index 4eeb48f5f..1570845fe 100644 --- a/builder/proxmox/bootcommand_driver.go +++ b/builder/proxmox/bootcommand_driver.go @@ -2,7 +2,6 @@ package proxmox import ( "fmt" - "strings" "time" "unicode" @@ -83,14 +82,9 @@ func (p *proxmoxDriver) SendKey(key rune, action bootcommand.KeyAction) error { return p.send(special) } - const shiftFormat = "shift-%c" - const shiftedChars = "~!@#$%^&*()_+{}|:\"<>?" // Copied from bootcommand/driver.go - - keyShift := unicode.IsUpper(key) || strings.ContainsRune(shiftedChars, key) - var keys string - if keyShift { - keys = fmt.Sprintf(shiftFormat, unicode.ToLower(key)) + if unicode.IsUpper(key) { + keys = fmt.Sprintf("shift-%c", unicode.ToLower(key)) } else { keys = fmt.Sprintf("%c", key) } From c4ce295f6798ca728aaae1d72b4cf71d4b494111 Mon Sep 17 00:00:00 2001 From: Calle Pettersson Date: Thu, 14 Mar 2019 22:28:07 +0100 Subject: [PATCH 37/47] Add tests for step_start_vm cleanup --- builder/proxmox/step_start_vm.go | 7 +- builder/proxmox/step_start_vm_test.go | 108 ++++++++++++++++++++++++++ 2 files changed, 114 insertions(+), 1 deletion(-) create mode 100644 builder/proxmox/step_start_vm_test.go diff --git a/builder/proxmox/step_start_vm.go b/builder/proxmox/step_start_vm.go index f3a839c11..c65b8c968 100644 --- a/builder/proxmox/step_start_vm.go +++ b/builder/proxmox/step_start_vm.go @@ -109,6 +109,11 @@ func setDeviceParamIfDefined(dev proxmox.QemuDevice, key, value string) { } } +type startedVMCleaner interface { + StopVm(*proxmox.VmRef) (string, error) + DeleteVm(*proxmox.VmRef) (string, error) +} + func (s *stepStartVM) Cleanup(state multistep.StateBag) { vmRefUntyped, ok := state.GetOk("vmRef") // If not ok, we probably errored out before creating the VM @@ -123,7 +128,7 @@ func (s *stepStartVM) Cleanup(state multistep.StateBag) { return } - client := state.Get("proxmoxClient").(*proxmox.Client) + client := state.Get("proxmoxClient").(startedVMCleaner) ui := state.Get("ui").(packer.Ui) // Destroy the server we just created diff --git a/builder/proxmox/step_start_vm_test.go b/builder/proxmox/step_start_vm_test.go new file mode 100644 index 000000000..70f0aa37b --- /dev/null +++ b/builder/proxmox/step_start_vm_test.go @@ -0,0 +1,108 @@ +package proxmox + +import ( + "fmt" + "testing" + + "github.com/Telmate/proxmox-api-go/proxmox" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +type startedVMCleanerMock struct { + stopVm func() (string, error) + deleteVm func() (string, error) +} + +var _ startedVMCleaner = &startedVMCleanerMock{} + +func (m startedVMCleanerMock) StopVm(*proxmox.VmRef) (string, error) { + return m.stopVm() +} +func (m startedVMCleanerMock) DeleteVm(*proxmox.VmRef) (string, error) { + return m.deleteVm() +} + +func TestCleanupStartVM(t *testing.T) { + cs := []struct { + name string + setVmRef bool + setSuccess bool + stopVMErr error + expectCallStopVM bool + deleteVMErr error + expectCallDeleteVM bool + }{ + { + name: "when vmRef state is not set, nothing should happen", + setVmRef: false, + expectCallStopVM: false, + }, + { + name: "when success state is set, nothing should happen", + setVmRef: true, + setSuccess: true, + expectCallStopVM: false, + }, + { + name: "when not successful, vm should be stopped and deleted", + setVmRef: true, + setSuccess: false, + expectCallStopVM: true, + expectCallDeleteVM: true, + }, + { + name: "if stopping fails, DeleteVm should not be called", + setVmRef: true, + setSuccess: false, + expectCallStopVM: true, + stopVMErr: fmt.Errorf("some error"), + expectCallDeleteVM: false, + }, + } + + for _, c := range cs { + t.Run(c.name, func(t *testing.T) { + var stopWasCalled, deleteWasCalled bool + + cleaner := startedVMCleanerMock{ + stopVm: func() (string, error) { + if !c.expectCallStopVM { + t.Error("Did not expect StopVm to be called") + } + + stopWasCalled = true + return "", c.stopVMErr + }, + deleteVm: func() (string, error) { + if !c.expectCallDeleteVM { + t.Error("Did not expect DeleteVm to be called") + } + + deleteWasCalled = true + return "", c.deleteVMErr + }, + } + + state := new(multistep.BasicStateBag) + state.Put("ui", packer.TestUi(t)) + state.Put("proxmoxClient", cleaner) + if c.setVmRef { + state.Put("vmRef", proxmox.NewVmRef(1)) + } + if c.setSuccess { + state.Put("success", "true") + } + + step := stepStartVM{} + step.Cleanup(state) + + if c.expectCallStopVM && !stopWasCalled { + t.Error("Expected StopVm to be called, but it wasn't") + } + if c.expectCallDeleteVM && !deleteWasCalled { + t.Error("Expected DeleteVm to be called, but it wasn't") + } + }) + } +} From 2f754c38f8cd61d58d8e9dd3284a854c00b8a3f1 Mon Sep 17 00:00:00 2001 From: Calle Pettersson Date: Thu, 14 Mar 2019 22:32:59 +0100 Subject: [PATCH 38/47] Add validation of interface implementation for both proxmox.Client and mocks --- builder/proxmox/step_convert_to_template.go | 2 ++ builder/proxmox/step_convert_to_template_test.go | 2 ++ builder/proxmox/step_finalize_template_config.go | 4 +++- builder/proxmox/step_finalize_template_config_test.go | 4 +++- builder/proxmox/step_start_vm.go | 2 ++ builder/proxmox/step_start_vm_test.go | 4 ++-- builder/proxmox/step_type_boot_command.go | 2 ++ builder/proxmox/step_type_boot_command_test.go | 2 ++ 8 files changed, 18 insertions(+), 4 deletions(-) diff --git a/builder/proxmox/step_convert_to_template.go b/builder/proxmox/step_convert_to_template.go index 9e514f3d6..712badd81 100644 --- a/builder/proxmox/step_convert_to_template.go +++ b/builder/proxmox/step_convert_to_template.go @@ -20,6 +20,8 @@ type templateConverter interface { CreateTemplate(*proxmox.VmRef) error } +var _ templateConverter = &proxmox.Client{} + func (s *stepConvertToTemplate) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) client := state.Get("proxmoxClient").(templateConverter) diff --git a/builder/proxmox/step_convert_to_template_test.go b/builder/proxmox/step_convert_to_template_test.go index 7df852627..a7f472e3a 100644 --- a/builder/proxmox/step_convert_to_template_test.go +++ b/builder/proxmox/step_convert_to_template_test.go @@ -22,6 +22,8 @@ func (m converterMock) CreateTemplate(r *proxmox.VmRef) error { return m.createTemplate(r) } +var _ templateConverter = converterMock{} + func TestConvertToTemplate(t *testing.T) { cs := []struct { name string diff --git a/builder/proxmox/step_finalize_template_config.go b/builder/proxmox/step_finalize_template_config.go index c6de188ef..df90fc9f8 100644 --- a/builder/proxmox/step_finalize_template_config.go +++ b/builder/proxmox/step_finalize_template_config.go @@ -17,9 +17,11 @@ type stepFinalizeTemplateConfig struct{} type templateFinalizer interface { GetVmConfig(*proxmox.VmRef) (map[string]interface{}, error) - SetVmConfig(*proxmox.VmRef, map[string]interface{}) (string, error) + SetVmConfig(*proxmox.VmRef, map[string]interface{}) (interface{}, error) } +var _ templateFinalizer = &proxmox.Client{} + func (s *stepFinalizeTemplateConfig) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) client := state.Get("proxmoxClient").(templateFinalizer) diff --git a/builder/proxmox/step_finalize_template_config_test.go b/builder/proxmox/step_finalize_template_config_test.go index 58de6c966..501dddef2 100644 --- a/builder/proxmox/step_finalize_template_config_test.go +++ b/builder/proxmox/step_finalize_template_config_test.go @@ -18,10 +18,12 @@ type finalizerMock struct { func (m finalizerMock) GetVmConfig(*proxmox.VmRef) (map[string]interface{}, error) { return m.getConfig() } -func (m finalizerMock) SetVmConfig(vmref *proxmox.VmRef, c map[string]interface{}) (string, error) { +func (m finalizerMock) SetVmConfig(vmref *proxmox.VmRef, c map[string]interface{}) (interface{}, error) { return m.setConfig(c) } +var _ templateFinalizer = finalizerMock{} + func TestTemplateFinalize(t *testing.T) { cs := []struct { name string diff --git a/builder/proxmox/step_start_vm.go b/builder/proxmox/step_start_vm.go index c65b8c968..779394023 100644 --- a/builder/proxmox/step_start_vm.go +++ b/builder/proxmox/step_start_vm.go @@ -114,6 +114,8 @@ type startedVMCleaner interface { DeleteVm(*proxmox.VmRef) (string, error) } +var _ startedVMCleaner = &proxmox.Client{} + func (s *stepStartVM) Cleanup(state multistep.StateBag) { vmRefUntyped, ok := state.GetOk("vmRef") // If not ok, we probably errored out before creating the VM diff --git a/builder/proxmox/step_start_vm_test.go b/builder/proxmox/step_start_vm_test.go index 70f0aa37b..cb19670f5 100644 --- a/builder/proxmox/step_start_vm_test.go +++ b/builder/proxmox/step_start_vm_test.go @@ -14,8 +14,6 @@ type startedVMCleanerMock struct { deleteVm func() (string, error) } -var _ startedVMCleaner = &startedVMCleanerMock{} - func (m startedVMCleanerMock) StopVm(*proxmox.VmRef) (string, error) { return m.stopVm() } @@ -23,6 +21,8 @@ func (m startedVMCleanerMock) DeleteVm(*proxmox.VmRef) (string, error) { return m.deleteVm() } +var _ startedVMCleaner = &startedVMCleanerMock{} + func TestCleanupStartVM(t *testing.T) { cs := []struct { name string diff --git a/builder/proxmox/step_type_boot_command.go b/builder/proxmox/step_type_boot_command.go index 1967b09ca..2e87cb7e5 100644 --- a/builder/proxmox/step_type_boot_command.go +++ b/builder/proxmox/step_type_boot_command.go @@ -32,6 +32,8 @@ type commandTyper interface { MonitorCmd(*proxmox.VmRef, string) (map[string]interface{}, error) } +var _ commandTyper = &proxmox.Client{} + func (s *stepTypeBootCommand) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) c := state.Get("config").(*Config) diff --git a/builder/proxmox/step_type_boot_command_test.go b/builder/proxmox/step_type_boot_command_test.go index efe8b4c4c..b2f285c04 100644 --- a/builder/proxmox/step_type_boot_command_test.go +++ b/builder/proxmox/step_type_boot_command_test.go @@ -20,6 +20,8 @@ func (m commandTyperMock) MonitorCmd(ref *proxmox.VmRef, cmd string) (map[string return m.monitorCmd(ref, cmd) } +var _ commandTyper = commandTyperMock{} + func TestTypeBootCommand(t *testing.T) { cs := []struct { name string From 65f38978f8e12421adea8f6a09eeac2b9fce3f94 Mon Sep 17 00:00:00 2001 From: Calle Pettersson Date: Sat, 16 Mar 2019 17:19:17 +0100 Subject: [PATCH 39/47] Add vendor --- .../github.com/Telmate/proxmox-api-go/LICENSE | 21 + .../Telmate/proxmox-api-go/proxmox/client.go | 597 +++++++++++++++ .../proxmox-api-go/proxmox/config_qemu.go | 706 ++++++++++++++++++ .../Telmate/proxmox-api-go/proxmox/session.go | 319 ++++++++ .../Telmate/proxmox-api-go/proxmox/util.go | 62 ++ vendor/vendor.json | 6 + 6 files changed, 1711 insertions(+) create mode 100644 vendor/github.com/Telmate/proxmox-api-go/LICENSE create mode 100644 vendor/github.com/Telmate/proxmox-api-go/proxmox/client.go create mode 100644 vendor/github.com/Telmate/proxmox-api-go/proxmox/config_qemu.go create mode 100644 vendor/github.com/Telmate/proxmox-api-go/proxmox/session.go create mode 100644 vendor/github.com/Telmate/proxmox-api-go/proxmox/util.go diff --git a/vendor/github.com/Telmate/proxmox-api-go/LICENSE b/vendor/github.com/Telmate/proxmox-api-go/LICENSE new file mode 100644 index 000000000..57395f1b0 --- /dev/null +++ b/vendor/github.com/Telmate/proxmox-api-go/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Telmate/proxmox-api-go/proxmox/client.go b/vendor/github.com/Telmate/proxmox-api-go/proxmox/client.go new file mode 100644 index 000000000..7b97b9505 --- /dev/null +++ b/vendor/github.com/Telmate/proxmox-api-go/proxmox/client.go @@ -0,0 +1,597 @@ +package proxmox + +// inspired by https://github.com/Telmate/vagrant-proxmox/blob/master/lib/vagrant-proxmox/proxmox/connection.rb + +import ( + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "regexp" + "strconv" + "strings" + "time" +) + +// TaskTimeout - default async task call timeout in seconds +const TaskTimeout = 300 + +// TaskStatusCheckInterval - time between async checks in seconds +const TaskStatusCheckInterval = 2 + +const exitStatusSuccess = "OK" + +// Client - URL, user and password to specifc Proxmox node +type Client struct { + session *Session + ApiUrl string + Username string + Password string +} + +// VmRef - virtual machine ref parts +// map[type:qemu node:proxmox1-xx id:qemu/132 diskread:5.57424738e+08 disk:0 netin:5.9297450593e+10 mem:3.3235968e+09 uptime:1.4567097e+07 vmid:132 template:0 maxcpu:2 netout:6.053310416e+09 maxdisk:3.4359738368e+10 maxmem:8.592031744e+09 diskwrite:1.49663619584e+12 status:running cpu:0.00386980694947209 name:appt-app1-dev.xxx.xx] +type VmRef struct { + vmId int + node string + vmType string +} + +func (vmr *VmRef) SetNode(node string) { + vmr.node = node + return +} + +func (vmr *VmRef) SetVmType(vmType string) { + vmr.vmType = vmType + return +} + +func (vmr *VmRef) VmId() int { + return vmr.vmId +} + +func (vmr *VmRef) Node() string { + return vmr.node +} + +func NewVmRef(vmId int) (vmr *VmRef) { + vmr = &VmRef{vmId: vmId, node: "", vmType: ""} + return +} + +func NewClient(apiUrl string, hclient *http.Client, tls *tls.Config) (client *Client, err error) { + var sess *Session + sess, err = NewSession(apiUrl, hclient, tls) + if err == nil { + client = &Client{session: sess, ApiUrl: apiUrl} + } + return client, err +} + +func (c *Client) Login(username string, password string) (err error) { + c.Username = username + c.Password = password + return c.session.Login(username, password) +} + +func (c *Client) GetJsonRetryable(url string, data *map[string]interface{}, tries int) error { + var statErr error + for ii := 0; ii < tries; ii++ { + _, statErr = c.session.GetJSON(url, nil, nil, data) + if statErr == nil { + return nil + } + // if statErr != io.ErrUnexpectedEOF { // don't give up on ErrUnexpectedEOF + // return statErr + // } + time.Sleep(5 * time.Second) + } + return statErr +} + +func (c *Client) GetNodeList() (list map[string]interface{}, err error) { + err = c.GetJsonRetryable("/nodes", &list, 3) + return +} + +func (c *Client) GetVmList() (list map[string]interface{}, err error) { + err = c.GetJsonRetryable("/cluster/resources?type=vm", &list, 3) + return +} + +func (c *Client) CheckVmRef(vmr *VmRef) (err error) { + if vmr.node == "" || vmr.vmType == "" { + _, err = c.GetVmInfo(vmr) + } + return +} + +func (c *Client) GetVmInfo(vmr *VmRef) (vmInfo map[string]interface{}, err error) { + resp, err := c.GetVmList() + vms := resp["data"].([]interface{}) + for vmii := range vms { + vm := vms[vmii].(map[string]interface{}) + if int(vm["vmid"].(float64)) == vmr.vmId { + vmInfo = vm + vmr.node = vmInfo["node"].(string) + vmr.vmType = vmInfo["type"].(string) + return + } + } + return nil, errors.New(fmt.Sprintf("Vm '%d' not found", vmr.vmId)) +} + +func (c *Client) GetVmRefByName(vmName string) (vmr *VmRef, err error) { + resp, err := c.GetVmList() + vms := resp["data"].([]interface{}) + for vmii := range vms { + vm := vms[vmii].(map[string]interface{}) + if vm["name"] != nil && vm["name"].(string) == vmName { + vmr = NewVmRef(int(vm["vmid"].(float64))) + vmr.node = vm["node"].(string) + vmr.vmType = vm["type"].(string) + return + } + } + return nil, errors.New(fmt.Sprintf("Vm '%s' not found", vmName)) +} + +func (c *Client) GetVmState(vmr *VmRef) (vmState map[string]interface{}, err error) { + err = c.CheckVmRef(vmr) + if err != nil { + return nil, err + } + var data map[string]interface{} + url := fmt.Sprintf("/nodes/%s/%s/%d/status/current", vmr.node, vmr.vmType, vmr.vmId) + err = c.GetJsonRetryable(url, &data, 3) + if err != nil { + return nil, err + } + if data["data"] == nil { + return nil, errors.New("Vm STATE not readable") + } + vmState = data["data"].(map[string]interface{}) + return +} + +func (c *Client) GetVmConfig(vmr *VmRef) (vmConfig map[string]interface{}, err error) { + err = c.CheckVmRef(vmr) + if err != nil { + return nil, err + } + var data map[string]interface{} + url := fmt.Sprintf("/nodes/%s/%s/%d/config", vmr.node, vmr.vmType, vmr.vmId) + err = c.GetJsonRetryable(url, &data, 3) + if err != nil { + return nil, err + } + if data["data"] == nil { + return nil, errors.New("Vm CONFIG not readable") + } + vmConfig = data["data"].(map[string]interface{}) + return +} + +func (c *Client) GetVmSpiceProxy(vmr *VmRef) (vmSpiceProxy map[string]interface{}, err error) { + err = c.CheckVmRef(vmr) + if err != nil { + return nil, err + } + var data map[string]interface{} + url := fmt.Sprintf("/nodes/%s/%s/%d/spiceproxy", vmr.node, vmr.vmType, vmr.vmId) + _, err = c.session.PostJSON(url, nil, nil, nil, &data) + if err != nil { + return nil, err + } + if data["data"] == nil { + return nil, errors.New("Vm SpiceProxy not readable") + } + vmSpiceProxy = data["data"].(map[string]interface{}) + return +} + +type AgentNetworkInterface struct { + MACAddress string + IPAddresses []net.IP + Name string + Statistics map[string]int64 +} + +func (a *AgentNetworkInterface) UnmarshalJSON(b []byte) error { + var intermediate struct { + HardwareAddress string `json:"hardware-address"` + IPAddresses []struct { + IPAddress string `json:"ip-address"` + IPAddressType string `json:"ip-address-type"` + Prefix int `json:"prefix"` + } `json:"ip-addresses"` + Name string `json:"name"` + Statistics map[string]int64 `json:"statistics"` + } + err := json.Unmarshal(b, &intermediate) + if err != nil { + return err + } + + a.IPAddresses = make([]net.IP, len(intermediate.IPAddresses)) + for idx, ip := range intermediate.IPAddresses { + a.IPAddresses[idx] = net.ParseIP(ip.IPAddress) + if a.IPAddresses[idx] == nil { + return fmt.Errorf("Could not parse %s as IP", ip.IPAddress) + } + } + a.MACAddress = intermediate.HardwareAddress + a.Name = intermediate.Name + a.Statistics = intermediate.Statistics + return nil +} + +func (c *Client) GetVmAgentNetworkInterfaces(vmr *VmRef) ([]AgentNetworkInterface, error) { + var ifs []AgentNetworkInterface + err := c.doAgentGet(vmr, "network-get-interfaces", &ifs) + return ifs, err +} + +func (c *Client) doAgentGet(vmr *VmRef, command string, output interface{}) error { + err := c.CheckVmRef(vmr) + if err != nil { + return err + } + + url := fmt.Sprintf("/nodes/%s/%s/%d/agent/%s", vmr.node, vmr.vmType, vmr.vmId, command) + resp, err := c.session.Get(url, nil, nil) + if err != nil { + return err + } + + return TypedResponse(resp, output) +} + +func (c *Client) CreateTemplate(vmr *VmRef) error { + err := c.CheckVmRef(vmr) + if err != nil { + return err + } + + url := fmt.Sprintf("/nodes/%s/%s/%d/template", vmr.node, vmr.vmType, vmr.vmId) + _, err = c.session.Post(url, nil, nil, nil) + if err != nil { + return err + } + + return nil +} + +func (c *Client) MonitorCmd(vmr *VmRef, command string) (monitorRes map[string]interface{}, err error) { + err = c.CheckVmRef(vmr) + if err != nil { + return nil, err + } + reqbody := ParamsToBody(map[string]interface{}{"command": command}) + url := fmt.Sprintf("/nodes/%s/%s/%d/monitor", vmr.node, vmr.vmType, vmr.vmId) + resp, err := c.session.Post(url, nil, nil, &reqbody) + monitorRes, err = ResponseJSON(resp) + return +} + +// WaitForCompletion - poll the API for task completion +func (c *Client) WaitForCompletion(taskResponse map[string]interface{}) (waitExitStatus string, err error) { + if taskResponse["errors"] != nil { + errJSON, _ := json.MarshalIndent(taskResponse["errors"], "", " ") + return string(errJSON), errors.New("Error reponse") + } + if taskResponse["data"] == nil { + return "", nil + } + waited := 0 + taskUpid := taskResponse["data"].(string) + for waited < TaskTimeout { + exitStatus, statErr := c.GetTaskExitstatus(taskUpid) + if statErr != nil { + if statErr != io.ErrUnexpectedEOF { // don't give up on ErrUnexpectedEOF + return "", statErr + } + } + if exitStatus != nil { + waitExitStatus = exitStatus.(string) + return + } + time.Sleep(TaskStatusCheckInterval * time.Second) + waited = waited + TaskStatusCheckInterval + } + return "", errors.New("Wait timeout for:" + taskUpid) +} + +var rxTaskNode = regexp.MustCompile("UPID:(.*?):") + +func (c *Client) GetTaskExitstatus(taskUpid string) (exitStatus interface{}, err error) { + node := rxTaskNode.FindStringSubmatch(taskUpid)[1] + url := fmt.Sprintf("/nodes/%s/tasks/%s/status", node, taskUpid) + var data map[string]interface{} + _, err = c.session.GetJSON(url, nil, nil, &data) + if err == nil { + exitStatus = data["data"].(map[string]interface{})["exitstatus"] + } + if exitStatus != nil && exitStatus != exitStatusSuccess { + err = errors.New(exitStatus.(string)) + } + return +} + +func (c *Client) StatusChangeVm(vmr *VmRef, setStatus string) (exitStatus string, err error) { + err = c.CheckVmRef(vmr) + if err != nil { + return "", err + } + + url := fmt.Sprintf("/nodes/%s/%s/%d/status/%s", vmr.node, vmr.vmType, vmr.vmId, setStatus) + var taskResponse map[string]interface{} + for i := 0; i < 3; i++ { + _, err = c.session.PostJSON(url, nil, nil, nil, &taskResponse) + exitStatus, err = c.WaitForCompletion(taskResponse) + if exitStatus == "" { + time.Sleep(TaskStatusCheckInterval * time.Second) + } else { + return + } + } + return +} + +func (c *Client) StartVm(vmr *VmRef) (exitStatus string, err error) { + return c.StatusChangeVm(vmr, "start") +} + +func (c *Client) StopVm(vmr *VmRef) (exitStatus string, err error) { + return c.StatusChangeVm(vmr, "stop") +} + +func (c *Client) ShutdownVm(vmr *VmRef) (exitStatus string, err error) { + return c.StatusChangeVm(vmr, "shutdown") +} + +func (c *Client) ResetVm(vmr *VmRef) (exitStatus string, err error) { + return c.StatusChangeVm(vmr, "reset") +} + +func (c *Client) SuspendVm(vmr *VmRef) (exitStatus string, err error) { + return c.StatusChangeVm(vmr, "suspend") +} + +func (c *Client) ResumeVm(vmr *VmRef) (exitStatus string, err error) { + return c.StatusChangeVm(vmr, "resume") +} + +func (c *Client) DeleteVm(vmr *VmRef) (exitStatus string, err error) { + err = c.CheckVmRef(vmr) + if err != nil { + return "", err + } + url := fmt.Sprintf("/nodes/%s/%s/%d", vmr.node, vmr.vmType, vmr.vmId) + var taskResponse map[string]interface{} + _, err = c.session.RequestJSON("DELETE", url, nil, nil, nil, &taskResponse) + exitStatus, err = c.WaitForCompletion(taskResponse) + return +} + +func (c *Client) CreateQemuVm(node string, vmParams map[string]interface{}) (exitStatus string, err error) { + // Create VM disks first to ensure disks names. + createdDisks, createdDisksErr := c.createVMDisks(node, vmParams) + if createdDisksErr != nil { + return "", createdDisksErr + } + + // Then create the VM itself. + reqbody := ParamsToBody(vmParams) + url := fmt.Sprintf("/nodes/%s/qemu", node) + var resp *http.Response + resp, err = c.session.Post(url, nil, nil, &reqbody) + defer resp.Body.Close() + if err != nil { + b, _ := ioutil.ReadAll(resp.Body) + exitStatus = string(b) + return + } + + taskResponse, err := ResponseJSON(resp) + if err != nil { + return + } + exitStatus, err = c.WaitForCompletion(taskResponse) + // Delete VM disks if the VM didn't create. + if exitStatus != "OK" { + deleteDisksErr := c.DeleteVMDisks(node, createdDisks) + if deleteDisksErr != nil { + return "", deleteDisksErr + } + } + + return +} + +func (c *Client) CloneQemuVm(vmr *VmRef, vmParams map[string]interface{}) (exitStatus string, err error) { + reqbody := ParamsToBody(vmParams) + url := fmt.Sprintf("/nodes/%s/qemu/%d/clone", vmr.node, vmr.vmId) + resp, err := c.session.Post(url, nil, nil, &reqbody) + if err == nil { + taskResponse, err := ResponseJSON(resp) + if err != nil { + return "", err + } + exitStatus, err = c.WaitForCompletion(taskResponse) + } + return +} + +func (c *Client) RollbackQemuVm(vmr *VmRef, snapshot string) (exitStatus string, err error) { + err = c.CheckVmRef(vmr) + if err != nil { + return "", err + } + url := fmt.Sprintf("/nodes/%s/%s/%d/snapshot/%s/rollback", vmr.node, vmr.vmType, vmr.vmId, snapshot) + var taskResponse map[string]interface{} + _, err = c.session.PostJSON(url, nil, nil, nil, &taskResponse) + exitStatus, err = c.WaitForCompletion(taskResponse) + return +} + +// SetVmConfig - send config options +func (c *Client) SetVmConfig(vmr *VmRef, vmParams map[string]interface{}) (exitStatus interface{}, err error) { + reqbody := ParamsToBody(vmParams) + url := fmt.Sprintf("/nodes/%s/%s/%d/config", vmr.node, vmr.vmType, vmr.vmId) + resp, err := c.session.Post(url, nil, nil, &reqbody) + if err == nil { + taskResponse, err := ResponseJSON(resp) + if err != nil { + return nil, err + } + exitStatus, err = c.WaitForCompletion(taskResponse) + } + return +} + +func (c *Client) ResizeQemuDisk(vmr *VmRef, disk string, moreSizeGB int) (exitStatus interface{}, err error) { + // PUT + //disk:virtio0 + //size:+2G + if disk == "" { + disk = "virtio0" + } + size := fmt.Sprintf("+%dG", moreSizeGB) + reqbody := ParamsToBody(map[string]interface{}{"disk": disk, "size": size}) + url := fmt.Sprintf("/nodes/%s/%s/%d/resize", vmr.node, vmr.vmType, vmr.vmId) + resp, err := c.session.Put(url, nil, nil, &reqbody) + if err == nil { + taskResponse, err := ResponseJSON(resp) + if err != nil { + return nil, err + } + exitStatus, err = c.WaitForCompletion(taskResponse) + } + return +} + +// GetNextID - Get next free VMID +func (c *Client) GetNextID(currentID int) (nextID int, err error) { + var data map[string]interface{} + var url string + if currentID >= 100 { + url = fmt.Sprintf("/cluster/nextid?vmid=%d", currentID) + } else { + url = "/cluster/nextid" + } + _, err = c.session.GetJSON(url, nil, nil, &data) + if err == nil { + if data["errors"] != nil { + if currentID >= 100 { + return c.GetNextID(currentID + 1) + } else { + return -1, errors.New("error using /cluster/nextid") + } + } + nextID, err = strconv.Atoi(data["data"].(string)) + } + return +} + +// CreateVMDisk - Create single disk for VM on host node. +func (c *Client) CreateVMDisk( + nodeName string, + storageName string, + fullDiskName string, + diskParams map[string]interface{}, +) error { + + reqbody := ParamsToBody(diskParams) + url := fmt.Sprintf("/nodes/%s/storage/%s/content", nodeName, storageName) + resp, err := c.session.Post(url, nil, nil, &reqbody) + if err == nil { + taskResponse, err := ResponseJSON(resp) + if err != nil { + return err + } + if diskName, containsData := taskResponse["data"]; !containsData || diskName != fullDiskName { + return errors.New(fmt.Sprintf("Cannot create VM disk %s", fullDiskName)) + } + } else { + return err + } + + return nil +} + +// createVMDisks - Make disks parameters and create all VM disks on host node. +func (c *Client) createVMDisks( + node string, + vmParams map[string]interface{}, +) (disks []string, err error) { + var createdDisks []string + vmID := vmParams["vmid"].(int) + for deviceName, deviceConf := range vmParams { + rxStorageModels := `(ide|sata|scsi|virtio)\d+` + if matched, _ := regexp.MatchString(rxStorageModels, deviceName); matched { + deviceConfMap := ParseConf(deviceConf.(string), ",", "=") + // This if condition to differentiate between `disk` and `cdrom`. + if media, containsFile := deviceConfMap["media"]; containsFile && media == "disk" { + fullDiskName := deviceConfMap["file"].(string) + storageName, volumeName := getStorageAndVolumeName(fullDiskName, ":") + diskParams := map[string]interface{}{ + "vmid": vmID, + "filename": volumeName, + "size": deviceConfMap["size"], + } + err := c.CreateVMDisk(node, storageName, fullDiskName, diskParams) + if err != nil { + return createdDisks, err + } else { + createdDisks = append(createdDisks, fullDiskName) + } + } + } + } + + return createdDisks, nil +} + +// DeleteVMDisks - Delete VM disks from host node. +// By default the VM disks are deteled when the VM is deleted, +// so mainly this is used to delete the disks in case VM creation didn't complete. +func (c *Client) DeleteVMDisks( + node string, + disks []string, +) error { + for _, fullDiskName := range disks { + storageName, volumeName := getStorageAndVolumeName(fullDiskName, ":") + url := fmt.Sprintf("/nodes/%s/storage/%s/content/%s", node, storageName, volumeName) + _, err := c.session.Post(url, nil, nil, nil) + if err != nil { + return err + } + } + + return nil +} + +// getStorageAndVolumeName - Extract disk storage and disk volume, since disk name is saved +// in Proxmox with its storage. +func getStorageAndVolumeName( + fullDiskName string, + separator string, +) (storageName string, diskName string) { + storageAndVolumeName := strings.Split(fullDiskName, separator) + storageName, volumeName := storageAndVolumeName[0], storageAndVolumeName[1] + + // when disk type is dir, volumeName is `file=local:100/vm-100-disk-0.raw` + re := regexp.MustCompile(`\d+/(?P\S+.\S+)`) + match := re.FindStringSubmatch(volumeName) + if len(match) == 2 { + volumeName = match[1] + } + + return storageName, volumeName +} diff --git a/vendor/github.com/Telmate/proxmox-api-go/proxmox/config_qemu.go b/vendor/github.com/Telmate/proxmox-api-go/proxmox/config_qemu.go new file mode 100644 index 000000000..7c0ffd897 --- /dev/null +++ b/vendor/github.com/Telmate/proxmox-api-go/proxmox/config_qemu.go @@ -0,0 +1,706 @@ +package proxmox + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "log" + "math/rand" + "net" + "net/url" + "regexp" + "strconv" + "strings" + "time" +) + +type ( + QemuDevices map[int]map[string]interface{} + QemuDevice map[string]interface{} + QemuDeviceParam []string +) + +// ConfigQemu - Proxmox API QEMU options +type ConfigQemu struct { + Name string `json:"name"` + Description string `json:"desc"` + Onboot bool `json:"onboot"` + Agent string `json:"agent"` + Memory int `json:"memory"` + QemuOs string `json:"os"` + QemuCores int `json:"cores"` + QemuSockets int `json:"sockets"` + QemuIso string `json:"iso"` + FullClone *int `json:"fullclone"` + QemuDisks QemuDevices `json:"disk"` + QemuNetworks QemuDevices `json:"network"` + + // Deprecated single disk. + DiskSize float64 `json:"diskGB"` + Storage string `json:"storage"` + StorageType string `json:"storageType"` // virtio|scsi (cloud-init defaults to scsi) + + // Deprecated single nic. + QemuNicModel string `json:"nic"` + QemuBrige string `json:"bridge"` + QemuVlanTag int `json:"vlan"` + QemuMacAddr string `json:"mac"` + + // cloud-init options + CIuser string `json:"ciuser"` + CIpassword string `json:"cipassword"` + + Searchdomain string `json:"searchdomain"` + Nameserver string `json:"nameserver"` + Sshkeys string `json:"sshkeys"` + + // arrays are hard, support 2 interfaces for now + Ipconfig0 string `json:"ipconfig0"` + Ipconfig1 string `json:"ipconfig1"` +} + +// CreateVm - Tell Proxmox API to make the VM +func (config ConfigQemu) CreateVm(vmr *VmRef, client *Client) (err error) { + if config.HasCloudInit() { + return errors.New("Cloud-init parameters only supported on clones or updates") + } + vmr.SetVmType("qemu") + + params := map[string]interface{}{ + "vmid": vmr.vmId, + "name": config.Name, + "onboot": config.Onboot, + "agent": config.Agent, + "ide2": config.QemuIso + ",media=cdrom", + "ostype": config.QemuOs, + "sockets": config.QemuSockets, + "cores": config.QemuCores, + "cpu": "host", + "memory": config.Memory, + "description": config.Description, + } + + // Create disks config. + config.CreateQemuDisksParams(vmr.vmId, params, false) + + // Create networks config. + config.CreateQemuNetworksParams(vmr.vmId, params) + + exitStatus, err := client.CreateQemuVm(vmr.node, params) + if err != nil { + return fmt.Errorf("Error creating VM: %v, error status: %s (params: %v)", err, exitStatus, params) + } + return +} + +// HasCloudInit - are there cloud-init options? +func (config ConfigQemu) HasCloudInit() bool { + return config.CIuser != "" || + config.CIpassword != "" || + config.Searchdomain != "" || + config.Nameserver != "" || + config.Sshkeys != "" || + config.Ipconfig0 != "" || + config.Ipconfig1 != "" +} + +/* + +CloneVm +Example: Request + +nodes/proxmox1-xx/qemu/1012/clone + +newid:145 +name:tf-clone1 +target:proxmox1-xx +full:1 +storage:xxx + +*/ +func (config ConfigQemu) CloneVm(sourceVmr *VmRef, vmr *VmRef, client *Client) (err error) { + vmr.SetVmType("qemu") + fullclone := "1" + if config.FullClone != nil { + fullclone = strconv.Itoa(*config.FullClone) + } + storage := config.Storage + if disk0Storage, ok := config.QemuDisks[0]["storage"].(string); ok && len(disk0Storage) > 0 { + storage = disk0Storage + } + params := map[string]interface{}{ + "newid": vmr.vmId, + "target": vmr.node, + "name": config.Name, + "storage": storage, + "full": fullclone, + } + _, err = client.CloneQemuVm(sourceVmr, params) + if err != nil { + return + } + return config.UpdateConfig(vmr, client) +} + +func (config ConfigQemu) UpdateConfig(vmr *VmRef, client *Client) (err error) { + configParams := map[string]interface{}{ + "name": config.Name, + "description": config.Description, + "onboot": config.Onboot, + "agent": config.Agent, + "sockets": config.QemuSockets, + "cores": config.QemuCores, + "memory": config.Memory, + } + + // Create disks config. + config.CreateQemuDisksParams(vmr.vmId, configParams, true) + + // Create networks config. + config.CreateQemuNetworksParams(vmr.vmId, configParams) + + // cloud-init options + if config.CIuser != "" { + configParams["ciuser"] = config.CIuser + } + if config.CIpassword != "" { + configParams["cipassword"] = config.CIpassword + } + if config.Searchdomain != "" { + configParams["searchdomain"] = config.Searchdomain + } + if config.Nameserver != "" { + configParams["nameserver"] = config.Nameserver + } + if config.Sshkeys != "" { + sshkeyEnc := url.PathEscape(config.Sshkeys + "\n") + sshkeyEnc = strings.Replace(sshkeyEnc, "+", "%2B", -1) + sshkeyEnc = strings.Replace(sshkeyEnc, "@", "%40", -1) + sshkeyEnc = strings.Replace(sshkeyEnc, "=", "%3D", -1) + configParams["sshkeys"] = sshkeyEnc + } + if config.Ipconfig0 != "" { + configParams["ipconfig0"] = config.Ipconfig0 + } + if config.Ipconfig1 != "" { + configParams["ipconfig1"] = config.Ipconfig1 + } + _, err = client.SetVmConfig(vmr, configParams) + return err +} + +func NewConfigQemuFromJson(io io.Reader) (config *ConfigQemu, err error) { + config = &ConfigQemu{QemuVlanTag: -1} + err = json.NewDecoder(io).Decode(config) + if err != nil { + log.Fatal(err) + return nil, err + } + log.Println(config) + return +} + +var ( + rxIso = regexp.MustCompile(`(.*?),media`) + rxDeviceID = regexp.MustCompile(`\d+`) + rxDiskName = regexp.MustCompile(`(virtio|scsi)\d+`) + rxDiskType = regexp.MustCompile(`\D+`) + rxNicName = regexp.MustCompile(`net\d+`) +) + +func NewConfigQemuFromApi(vmr *VmRef, client *Client) (config *ConfigQemu, err error) { + var vmConfig map[string]interface{} + for ii := 0; ii < 3; ii++ { + vmConfig, err = client.GetVmConfig(vmr) + if err != nil { + log.Fatal(err) + return nil, err + } + // this can happen: + // {"data":{"lock":"clone","digest":"eb54fb9d9f120ba0c3bdf694f73b10002c375c38","description":" qmclone temporary file\n"}}) + if vmConfig["lock"] == nil { + break + } else { + time.Sleep(8 * time.Second) + } + } + + if vmConfig["lock"] != nil { + return nil, errors.New("vm locked, could not obtain config") + } + + // vmConfig Sample: map[ cpu:host + // net0:virtio=62:DF:XX:XX:XX:XX,bridge=vmbr0 + // ide2:local:iso/xxx-xx.iso,media=cdrom memory:2048 + // smbios1:uuid=8b3bf833-aad8-4545-xxx-xxxxxxx digest:aa6ce5xxxxx1b9ce33e4aaeff564d4 sockets:1 + // name:terraform-ubuntu1404-template bootdisk:virtio0 + // virtio0:ProxmoxxxxISCSI:vm-1014-disk-2,size=4G + // description:Base image + // cores:2 ostype:l26 + + name := "" + if _, isSet := vmConfig["name"]; isSet { + name = vmConfig["name"].(string) + } + description := "" + if _, isSet := vmConfig["description"]; isSet { + description = vmConfig["description"].(string) + } + onboot := true + if _, isSet := vmConfig["onboot"]; isSet { + onboot = Itob(int(vmConfig["onboot"].(float64))) + } + agent := "1" + if _, isSet := vmConfig["agent"]; isSet { + agent = vmConfig["agent"].(string) + } + ostype := "other" + if _, isSet := vmConfig["ostype"]; isSet { + ostype = vmConfig["ostype"].(string) + } + memory := 0.0 + if _, isSet := vmConfig["memory"]; isSet { + memory = vmConfig["memory"].(float64) + } + cores := 1.0 + if _, isSet := vmConfig["cores"]; isSet { + cores = vmConfig["cores"].(float64) + } + sockets := 1.0 + if _, isSet := vmConfig["sockets"]; isSet { + sockets = vmConfig["sockets"].(float64) + } + config = &ConfigQemu{ + Name: name, + Description: strings.TrimSpace(description), + Onboot: onboot, + Agent: agent, + QemuOs: ostype, + Memory: int(memory), + QemuCores: int(cores), + QemuSockets: int(sockets), + QemuVlanTag: -1, + QemuDisks: QemuDevices{}, + QemuNetworks: QemuDevices{}, + } + + if vmConfig["ide2"] != nil { + isoMatch := rxIso.FindStringSubmatch(vmConfig["ide2"].(string)) + config.QemuIso = isoMatch[1] + } + + if _, isSet := vmConfig["ciuser"]; isSet { + config.CIuser = vmConfig["ciuser"].(string) + } + if _, isSet := vmConfig["cipassword"]; isSet { + config.CIpassword = vmConfig["cipassword"].(string) + } + if _, isSet := vmConfig["searchdomain"]; isSet { + config.Searchdomain = vmConfig["searchdomain"].(string) + } + if _, isSet := vmConfig["sshkeys"]; isSet { + config.Sshkeys, _ = url.PathUnescape(vmConfig["sshkeys"].(string)) + } + if _, isSet := vmConfig["ipconfig0"]; isSet { + config.Ipconfig0 = vmConfig["ipconfig0"].(string) + } + if _, isSet := vmConfig["ipconfig1"]; isSet { + config.Ipconfig1 = vmConfig["ipconfig1"].(string) + } + + // Add disks. + diskNames := []string{} + + for k, _ := range vmConfig { + if diskName := rxDiskName.FindStringSubmatch(k); len(diskName) > 0 { + diskNames = append(diskNames, diskName[0]) + } + } + + for _, diskName := range diskNames { + diskConfStr := vmConfig[diskName] + diskConfList := strings.Split(diskConfStr.(string), ",") + + // + id := rxDeviceID.FindStringSubmatch(diskName) + diskID, _ := strconv.Atoi(id[0]) + diskType := rxDiskType.FindStringSubmatch(diskName)[0] + storageName, fileName := ParseSubConf(diskConfList[0], ":") + + // + diskConfMap := QemuDevice{ + "type": diskType, + "storage": storageName, + "file": fileName, + } + + // Add rest of device config. + diskConfMap.readDeviceConfig(diskConfList[1:]) + + // And device config to disks map. + if len(diskConfMap) > 0 { + config.QemuDisks[diskID] = diskConfMap + } + } + + // Add networks. + nicNameRe := regexp.MustCompile(`net\d+`) + nicNames := []string{} + + for k, _ := range vmConfig { + if nicName := nicNameRe.FindStringSubmatch(k); len(nicName) > 0 { + nicNames = append(nicNames, nicName[0]) + } + } + + for _, nicName := range nicNames { + nicConfStr := vmConfig[nicName] + nicConfList := strings.Split(nicConfStr.(string), ",") + + // + id := rxDeviceID.FindStringSubmatch(nicName) + nicID, _ := strconv.Atoi(id[0]) + model, macaddr := ParseSubConf(nicConfList[0], "=") + + // Add model and MAC address. + nicConfMap := QemuDevice{ + "model": model, + "macaddr": macaddr, + } + + // Add rest of device config. + nicConfMap.readDeviceConfig(nicConfList[1:]) + + // And device config to networks. + if len(nicConfMap) > 0 { + config.QemuNetworks[nicID] = nicConfMap + } + } + + return +} + +// Useful waiting for ISO install to complete +func WaitForShutdown(vmr *VmRef, client *Client) (err error) { + for ii := 0; ii < 100; ii++ { + vmState, err := client.GetVmState(vmr) + if err != nil { + log.Print("Wait error:") + log.Println(err) + } else if vmState["status"] == "stopped" { + return nil + } + time.Sleep(5 * time.Second) + } + return errors.New("Not shutdown within wait time") +} + +// This is because proxmox create/config API won't let us make usernet devices +func SshForwardUsernet(vmr *VmRef, client *Client) (sshPort string, err error) { + vmState, err := client.GetVmState(vmr) + if err != nil { + return "", err + } + if vmState["status"] == "stopped" { + return "", errors.New("VM must be running first") + } + sshPort = strconv.Itoa(vmr.VmId() + 22000) + _, err = client.MonitorCmd(vmr, "netdev_add user,id=net1,hostfwd=tcp::"+sshPort+"-:22") + if err != nil { + return "", err + } + _, err = client.MonitorCmd(vmr, "device_add virtio-net-pci,id=net1,netdev=net1,addr=0x13") + if err != nil { + return "", err + } + return +} + +// device_del net1 +// netdev_del net1 +func RemoveSshForwardUsernet(vmr *VmRef, client *Client) (err error) { + vmState, err := client.GetVmState(vmr) + if err != nil { + return err + } + if vmState["status"] == "stopped" { + return errors.New("VM must be running first") + } + _, err = client.MonitorCmd(vmr, "device_del net1") + if err != nil { + return err + } + _, err = client.MonitorCmd(vmr, "netdev_del net1") + if err != nil { + return err + } + return nil +} + +func MaxVmId(client *Client) (max int, err error) { + resp, err := client.GetVmList() + vms := resp["data"].([]interface{}) + max = 0 + for vmii := range vms { + vm := vms[vmii].(map[string]interface{}) + vmid := int(vm["vmid"].(float64)) + if vmid > max { + max = vmid + } + } + return +} + +func SendKeysString(vmr *VmRef, client *Client, keys string) (err error) { + vmState, err := client.GetVmState(vmr) + if err != nil { + return err + } + if vmState["status"] == "stopped" { + return errors.New("VM must be running first") + } + for _, r := range keys { + c := string(r) + lower := strings.ToLower(c) + if c != lower { + c = "shift-" + lower + } else { + switch c { + case "!": + c = "shift-1" + case "@": + c = "shift-2" + case "#": + c = "shift-3" + case "$": + c = "shift-4" + case "%%": + c = "shift-5" + case "^": + c = "shift-6" + case "&": + c = "shift-7" + case "*": + c = "shift-8" + case "(": + c = "shift-9" + case ")": + c = "shift-0" + case "_": + c = "shift-minus" + case "+": + c = "shift-equal" + case " ": + c = "spc" + case "/": + c = "slash" + case "\\": + c = "backslash" + case ",": + c = "comma" + case "-": + c = "minus" + case "=": + c = "equal" + case ".": + c = "dot" + case "?": + c = "shift-slash" + } + } + _, err = client.MonitorCmd(vmr, "sendkey "+c) + if err != nil { + return err + } + time.Sleep(100) + } + return nil +} + +// Create parameters for each Nic device. +func (c ConfigQemu) CreateQemuNetworksParams(vmID int, params map[string]interface{}) error { + + // For backward compatibility. + if len(c.QemuNetworks) == 0 && len(c.QemuNicModel) > 0 { + deprecatedStyleMap := QemuDevice{ + "model": c.QemuNicModel, + "bridge": c.QemuBrige, + "macaddr": c.QemuMacAddr, + } + + if c.QemuVlanTag > 0 { + deprecatedStyleMap["tag"] = strconv.Itoa(c.QemuVlanTag) + } + + c.QemuNetworks[0] = deprecatedStyleMap + } + + // For new style with multi net device. + for nicID, nicConfMap := range c.QemuNetworks { + + nicConfParam := QemuDeviceParam{} + + // Set Nic name. + qemuNicName := "net" + strconv.Itoa(nicID) + + // Set Mac address. + if nicConfMap["macaddr"] == nil || nicConfMap["macaddr"].(string) == "" { + // Generate Mac based on VmID and NicID so it will be the same always. + macaddr := make(net.HardwareAddr, 6) + rand.Seed(time.Now().UnixNano()) + rand.Read(macaddr) + macaddr[0] = (macaddr[0] | 2) & 0xfe // fix from github issue #18 + macAddrUppr := strings.ToUpper(fmt.Sprintf("%v", macaddr)) + // use model=mac format for older proxmox compatability + macAddr := fmt.Sprintf("%v=%v", nicConfMap["model"], macAddrUppr) + + // Add Mac to source map so it will be returned. (useful for some use case like Terraform) + nicConfMap["macaddr"] = macAddrUppr + // and also add it to the parameters which will be sent to Proxmox API. + nicConfParam = append(nicConfParam, macAddr) + } else { + macAddr := fmt.Sprintf("%v=%v", nicConfMap["model"], nicConfMap["macaddr"].(string)) + nicConfParam = append(nicConfParam, macAddr) + } + + // Set bridge if not nat. + if nicConfMap["bridge"].(string) != "nat" { + bridge := fmt.Sprintf("bridge=%v", nicConfMap["bridge"]) + nicConfParam = append(nicConfParam, bridge) + } + + // Keys that are not used as real/direct conf. + ignoredKeys := []string{"id", "bridge", "macaddr", "model"} + + // Rest of config. + nicConfParam = nicConfParam.createDeviceParam(nicConfMap, ignoredKeys) + + // Add nic to Qemu prams. + params[qemuNicName] = strings.Join(nicConfParam, ",") + } + + return nil +} + +// Create parameters for each disk. +func (c ConfigQemu) CreateQemuDisksParams( + vmID int, + params map[string]interface{}, + cloned bool, +) error { + + // For backward compatibility. + if len(c.QemuDisks) == 0 && len(c.Storage) > 0 { + + dType := c.StorageType + if dType == "" { + if c.HasCloudInit() { + dType = "scsi" + } else { + dType = "virtio" + } + } + deprecatedStyleMap := QemuDevice{ + "type": dType, + "storage": c.Storage, + "size": c.DiskSize, + "storage_type": "lvm", // default old style + "cache": "none", // default old value + } + + c.QemuDisks[0] = deprecatedStyleMap + } + + // For new style with multi disk device. + for diskID, diskConfMap := range c.QemuDisks { + + // skip the first disk for clones (may not always be right, but a template probably has at least 1 disk) + if diskID == 0 && cloned { + continue + } + diskConfParam := QemuDeviceParam{ + "media=disk", + } + + // Device name. + deviceType := diskConfMap["type"].(string) + qemuDiskName := deviceType + strconv.Itoa(diskID) + + // Set disk storage. + // Disk size. + diskSizeGB := fmt.Sprintf("size=%v", diskConfMap["size"]) + diskConfParam = append(diskConfParam, diskSizeGB) + + // Disk name. + var diskFile string + // Currently ZFS local, LVM, and Directory are considered. + // Other formats are not verified, but could be added if they're needed. + rxStorageTypes := `(zfspool|lvm)` + storageType := diskConfMap["storage_type"].(string) + if matched, _ := regexp.MatchString(rxStorageTypes, storageType); matched { + diskFile = fmt.Sprintf("file=%v:vm-%v-disk-%v", diskConfMap["storage"], vmID, diskID) + } else { + diskFile = fmt.Sprintf("file=%v:%v/vm-%v-disk-%v.%v", diskConfMap["storage"], vmID, vmID, diskID, diskConfMap["format"]) + } + diskConfParam = append(diskConfParam, diskFile) + + // Set cache if not none (default). + if diskConfMap["cache"].(string) != "none" { + diskCache := fmt.Sprintf("cache=%v", diskConfMap["cache"]) + diskConfParam = append(diskConfParam, diskCache) + } + + // Keys that are not used as real/direct conf. + ignoredKeys := []string{"id", "type", "storage", "storage_type", "size", "cache"} + + // Rest of config. + diskConfParam = diskConfParam.createDeviceParam(diskConfMap, ignoredKeys) + + // Add back to Qemu prams. + params[qemuDiskName] = strings.Join(diskConfParam, ",") + } + + return nil +} + +// Create the parameters for each device that will be sent to Proxmox API. +func (p QemuDeviceParam) createDeviceParam( + deviceConfMap QemuDevice, + ignoredKeys []string, +) QemuDeviceParam { + + for key, value := range deviceConfMap { + if ignored := inArray(ignoredKeys, key); !ignored { + var confValue interface{} + if bValue, ok := value.(bool); ok && bValue { + confValue = "1" + } else if sValue, ok := value.(string); ok && len(sValue) > 0 { + confValue = sValue + } else if iValue, ok := value.(int); ok && iValue > 0 { + confValue = iValue + } + if confValue != nil { + deviceConf := fmt.Sprintf("%v=%v", key, confValue) + p = append(p, deviceConf) + } + } + } + + return p +} + +// readDeviceConfig - get standard sub-conf strings where `key=value` and update conf map. +func (confMap QemuDevice) readDeviceConfig(confList []string) error { + // Add device config. + for _, conf := range confList { + key, value := ParseSubConf(conf, "=") + confMap[key] = value + } + return nil +} + +func (c ConfigQemu) String() string { + jsConf, _ := json.Marshal(c) + return string(jsConf) +} diff --git a/vendor/github.com/Telmate/proxmox-api-go/proxmox/session.go b/vendor/github.com/Telmate/proxmox-api-go/proxmox/session.go new file mode 100644 index 000000000..72f72eb6a --- /dev/null +++ b/vendor/github.com/Telmate/proxmox-api-go/proxmox/session.go @@ -0,0 +1,319 @@ +package proxmox + +// inspired by https://github.com/openstack/golang-client/blob/master/openstack/session.go + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/httputil" + "net/url" +) + +var Debug = new(bool) + +type Response struct { + Resp *http.Response + Body []byte +} + +type Session struct { + httpClient *http.Client + ApiUrl string + AuthTicket string + CsrfToken string + Headers http.Header +} + +func NewSession(apiUrl string, hclient *http.Client, tls *tls.Config) (session *Session, err error) { + if hclient == nil { + // Only build a transport if we're also building the client + tr := &http.Transport{ + TLSClientConfig: tls, + DisableCompression: true, + } + hclient = &http.Client{Transport: tr} + } + session = &Session{ + httpClient: hclient, + ApiUrl: apiUrl, + AuthTicket: "", + CsrfToken: "", + Headers: http.Header{}, + } + return session, nil +} + +func ParamsToBody(params map[string]interface{}) (body []byte) { + vals := url.Values{} + for k, intrV := range params { + var v string + switch intrV.(type) { + // Convert true/false bool to 1/0 string where Proxmox API can understand it. + case bool: + if intrV.(bool) { + v = "1" + } else { + v = "0" + } + default: + v = fmt.Sprintf("%v", intrV) + } + vals.Set(k, v) + } + body = bytes.NewBufferString(vals.Encode()).Bytes() + return +} + +func decodeResponse(resp *http.Response, v interface{}) error { + if resp.Body == nil { + return nil + } + rbody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("error reading response body: %s", err) + } + if err = json.Unmarshal(rbody, &v); err != nil { + return err + } + return nil +} + +func ResponseJSON(resp *http.Response) (jbody map[string]interface{}, err error) { + err = decodeResponse(resp, &jbody) + return jbody, err +} + +func TypedResponse(resp *http.Response, v interface{}) error { + var intermediate struct { + Data struct { + Result json.RawMessage `json:"result"` + } `json:"data"` + } + err := decodeResponse(resp, &intermediate) + if err != nil { + return fmt.Errorf("error reading response envelope: %v", err) + } + if err = json.Unmarshal(intermediate.Data.Result, v); err != nil { + return fmt.Errorf("error unmarshalling result %v", err) + } + return nil +} + +func (s *Session) Login(username string, password string) (err error) { + reqbody := ParamsToBody(map[string]interface{}{"username": username, "password": password}) + olddebug := *Debug + *Debug = false // don't share passwords in debug log + resp, err := s.Post("/access/ticket", nil, nil, &reqbody) + *Debug = olddebug + if err != nil { + return err + } + if resp == nil { + return errors.New("Login error reading response") + } + dr, _ := httputil.DumpResponse(resp, true) + jbody, err := ResponseJSON(resp) + if err != nil { + return err + } + if jbody == nil || jbody["data"] == nil { + return fmt.Errorf("Invalid login response:\n-----\n%s\n-----", dr) + } + dat := jbody["data"].(map[string]interface{}) + s.AuthTicket = dat["ticket"].(string) + s.CsrfToken = dat["CSRFPreventionToken"].(string) + return nil +} + +func (s *Session) NewRequest(method, url string, headers *http.Header, body io.Reader) (req *http.Request, err error) { + req, err = http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + if headers != nil { + req.Header = *headers + } + if s.AuthTicket != "" { + req.Header.Add("Cookie", "PVEAuthCookie="+s.AuthTicket) + req.Header.Add("CSRFPreventionToken", s.CsrfToken) + } + return +} + +func (s *Session) Do(req *http.Request) (*http.Response, error) { + // Add session headers + for k := range s.Headers { + req.Header.Set(k, s.Headers.Get(k)) + } + + if *Debug { + d, _ := httputil.DumpRequestOut(req, true) + log.Printf(">>>>>>>>>> REQUEST:\n", string(d)) + } + + resp, err := s.httpClient.Do(req) + + if err != nil { + return nil, err + } + + if *Debug { + dr, _ := httputil.DumpResponse(resp, true) + log.Printf("<<<<<<<<<< RESULT:\n", string(dr)) + } + + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return resp, errors.New(resp.Status) + } + + return resp, nil +} + +// Perform a simple get to an endpoint +func (s *Session) Request( + method string, + url string, + params *url.Values, + headers *http.Header, + body *[]byte, +) (resp *http.Response, err error) { + // add params to url here + url = s.ApiUrl + url + if params != nil { + url = url + "?" + params.Encode() + } + + // Get the body if one is present + var buf io.Reader + if body != nil { + buf = bytes.NewReader(*body) + } + + req, err := s.NewRequest(method, url, headers, buf) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", "application/json") + + return s.Do(req) +} + +// Perform a simple get to an endpoint and unmarshall returned JSON +func (s *Session) RequestJSON( + method string, + url string, + params *url.Values, + headers *http.Header, + body interface{}, + responseContainer interface{}, +) (resp *http.Response, err error) { + var bodyjson []byte + if body != nil { + bodyjson, err = json.Marshal(body) + if err != nil { + return nil, err + } + } + + // if headers == nil { + // headers = &http.Header{} + // headers.Add("Content-Type", "application/json") + // } + + resp, err = s.Request(method, url, params, headers, &bodyjson) + if err != nil { + return resp, err + } + + // err = util.CheckHTTPResponseStatusCode(resp) + // if err != nil { + // return nil, err + // } + + rbody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return resp, errors.New("error reading response body") + } + if err = json.Unmarshal(rbody, &responseContainer); err != nil { + return resp, err + } + + return resp, nil +} + +func (s *Session) Delete( + url string, + params *url.Values, + headers *http.Header, +) (resp *http.Response, err error) { + return s.Request("DELETE", url, params, headers, nil) +} + +func (s *Session) Get( + url string, + params *url.Values, + headers *http.Header, +) (resp *http.Response, err error) { + return s.Request("GET", url, params, headers, nil) +} + +func (s *Session) GetJSON( + url string, + params *url.Values, + headers *http.Header, + responseContainer interface{}, +) (resp *http.Response, err error) { + return s.RequestJSON("GET", url, params, headers, nil, responseContainer) +} + +func (s *Session) Head( + url string, + params *url.Values, + headers *http.Header, +) (resp *http.Response, err error) { + return s.Request("HEAD", url, params, headers, nil) +} + +func (s *Session) Post( + url string, + params *url.Values, + headers *http.Header, + body *[]byte, +) (resp *http.Response, err error) { + if headers == nil { + headers = &http.Header{} + headers.Add("Content-Type", "application/x-www-form-urlencoded") + } + return s.Request("POST", url, params, headers, body) +} + +func (s *Session) PostJSON( + url string, + params *url.Values, + headers *http.Header, + body interface{}, + responseContainer interface{}, +) (resp *http.Response, err error) { + return s.RequestJSON("POST", url, params, headers, body, responseContainer) +} + +func (s *Session) Put( + url string, + params *url.Values, + headers *http.Header, + body *[]byte, +) (resp *http.Response, err error) { + if headers == nil { + headers = &http.Header{} + headers.Add("Content-Type", "application/x-www-form-urlencoded") + } + return s.Request("PUT", url, params, headers, body) +} diff --git a/vendor/github.com/Telmate/proxmox-api-go/proxmox/util.go b/vendor/github.com/Telmate/proxmox-api-go/proxmox/util.go new file mode 100644 index 000000000..5841c421f --- /dev/null +++ b/vendor/github.com/Telmate/proxmox-api-go/proxmox/util.go @@ -0,0 +1,62 @@ +package proxmox + +import ( + "strconv" + "strings" +) + +func inArray(arr []string, str string) bool { + for _, elem := range arr { + if elem == str { + return true + } + } + + return false +} + +func Itob(i int) bool { + if i == 1 { + return true + } + return false +} + +// ParseSubConf - Parse standard sub-conf strings `key=value`. +func ParseSubConf( + element string, + separator string, +) (key string, value interface{}) { + if strings.Contains(element, separator) { + conf := strings.Split(element, separator) + key, value := conf[0], conf[1] + var interValue interface{} + + // Make sure to add value in right type, + // because all subconfig are returned as strings from Proxmox API. + if iValue, err := strconv.ParseInt(value, 10, 64); err == nil { + interValue = int(iValue) + } else if bValue, err := strconv.ParseBool(value); err == nil { + interValue = bValue + } else { + interValue = value + } + return key, interValue + } + return +} + +// ParseConf - Parse standard device conf string `key1=val1,key2=val2`. +func ParseConf( + kvString string, + confSeparator string, + subConfSeparator string, +) QemuDevice { + var confMap = QemuDevice{} + confList := strings.Split(kvString, confSeparator) + for _, item := range confList { + key, value := ParseSubConf(item, subConfSeparator) + confMap[key] = value + } + return confMap +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 44b60e07e..738aef533 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -230,6 +230,12 @@ "revision": "c2e73f942591b0f033a3c6df00f44badb2347c38", "revisionTime": "2018-01-10T05:50:12Z" }, + { + "checksumSHA1": "xp5QPaPy/Viwiv5P8lsJVYlaQ+U=", + "path": "github.com/Telmate/proxmox-api-go/proxmox", + "revision": "7402b5d3034a3e4dea5af6afdfd106a2e353f2da", + "revisionTime": "2019-03-16T14:21:38Z" + }, { "checksumSHA1": "HttiPj314X1a0i2Jen1p6lRH/vE=", "path": "github.com/aliyun/aliyun-oss-go-sdk/oss", From e9d5a1d2723c94628cd8fe4aa523bd9f3bf185db Mon Sep 17 00:00:00 2001 From: Calle Pettersson Date: Sat, 16 Mar 2019 17:43:25 +0100 Subject: [PATCH 40/47] Update to builder interface change --- builder/proxmox/builder.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/proxmox/builder.go b/builder/proxmox/builder.go index f44bcc7a3..ddeaa3a37 100644 --- a/builder/proxmox/builder.go +++ b/builder/proxmox/builder.go @@ -35,7 +35,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { return nil, nil } -func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { +func (b *Builder) Run(ui packer.Ui, hook packer.Hook) (packer.Artifact, error) { var err error tlsConfig := &tls.Config{ InsecureSkipVerify: b.config.SkipCertValidation, From 04b57b3e322ec0cbf1b3d046999a3d1a7bba5f5a Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 4 Apr 2019 16:57:42 -0700 Subject: [PATCH 41/47] fix tests; clean up ip file after build --- builder/proxmox/step_type_boot_command.go | 5 ++++- builder/proxmox/step_type_boot_command_test.go | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/builder/proxmox/step_type_boot_command.go b/builder/proxmox/step_type_boot_command.go index 2e87cb7e5..89d50fb0d 100644 --- a/builder/proxmox/step_type_boot_command.go +++ b/builder/proxmox/step_type_boot_command.go @@ -11,6 +11,7 @@ import ( "github.com/Telmate/proxmox-api-go/proxmox" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/common/bootcommand" + commonhelper "github.com/hashicorp/packer/helper/common" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/template/interpolate" @@ -96,7 +97,9 @@ func (s *stepTypeBootCommand) Run(ctx context.Context, state multistep.StateBag) return multistep.ActionContinue } -func (*stepTypeBootCommand) Cleanup(multistep.StateBag) {} +func (*stepTypeBootCommand) Cleanup(multistep.StateBag) { + commonhelper.RemoveSharedStateFile("ip", "") +} func hostIP() (string, error) { addrs, err := net.InterfaceAddrs() diff --git a/builder/proxmox/step_type_boot_command_test.go b/builder/proxmox/step_type_boot_command_test.go index b2f285c04..c51b57d73 100644 --- a/builder/proxmox/step_type_boot_command_test.go +++ b/builder/proxmox/step_type_boot_command_test.go @@ -115,6 +115,8 @@ func TestTypeBootCommand(t *testing.T) { c.builderConfig.ctx, } action := step.Run(context.TODO(), state) + step.Cleanup(state) + if action != c.expectedAction { t.Errorf("Expected action to be %v, got %v", c.expectedAction, action) } From a08d9dec9bfdfeea733583521e3f5f9fdbae8475 Mon Sep 17 00:00:00 2001 From: Adrien Delorme Date: Fri, 5 Apr 2019 15:30:41 +0200 Subject: [PATCH 42/47] add a Fixer that replaces the "clean_(image|ami)_name" template calls with "clean_resource_name" --- fix/fixer.go | 2 + fix/fixer_clean_image_name.go | 62 ++++++++++++++++++++++++++++++ fix/fixer_clean_image_name_test.go | 52 +++++++++++++++++++++++++ 3 files changed, 116 insertions(+) create mode 100644 fix/fixer_clean_image_name.go create mode 100644 fix/fixer_clean_image_name_test.go diff --git a/fix/fixer.go b/fix/fixer.go index bf2ccf25b..8ed4ac746 100644 --- a/fix/fixer.go +++ b/fix/fixer.go @@ -41,6 +41,7 @@ func init() { "hyperv-vmxc-typo": new(FixerHypervVmxcTypo), "hyperv-cpu-and-ram": new(FizerHypervCPUandRAM), "vmware-compaction": new(FixerVMwareCompaction), + "clean-image-name": new(FixerCleanImageName), } FixerOrder = []string{ @@ -63,5 +64,6 @@ func init() { "powershell-escapes", "vmware-compaction", "hyperv-cpu-and-ram", + "clean-image-name", } } diff --git a/fix/fixer_clean_image_name.go b/fix/fixer_clean_image_name.go new file mode 100644 index 000000000..cbb85f894 --- /dev/null +++ b/fix/fixer_clean_image_name.go @@ -0,0 +1,62 @@ +package fix + +import ( + "fmt" + "regexp" + + "github.com/mitchellh/mapstructure" +) + +// FixerCleanImageName is a Fixer that replaces the "clean_(image|ami)_name" template +// calls with "clean_resource_name" +type FixerCleanImageName struct{} + +func (FixerCleanImageName) Fix(input map[string]interface{}) (map[string]interface{}, error) { + // Our template type we'll use for this fixer only + type template struct { + Builders []map[string]interface{} + } + + // Decode the input into our structure, if we can + var tpl template + if err := mapstructure.Decode(input, &tpl); err != nil { + return nil, err + } + + re := regexp.MustCompile(`clean_(image|ami)_name`) + + // Go through each builder and replace CreateTime if we can + for _, builder := range tpl.Builders { + for key, value := range builder { + switch v := value.(type) { + case string: + changed := re.ReplaceAllString(v, "clean_resource_name") + builder[key] = changed + case map[string]string: + for k := range v { + v[k] = re.ReplaceAllString(v[k], "clean_resource_name") + } + builder[key] = v + case map[string]interface{}: + for k := range v { + if s, ok := v[k].(string); ok { + v[k] = re.ReplaceAllString(s, "clean_resource_name") + } + } + builder[key] = v + default: + if key == "image_labels" { + + panic(fmt.Sprintf("value: %#v", value)) + } + } + } + } + + input["builders"] = tpl.Builders + return input, nil +} + +func (FixerCleanImageName) Synopsis() string { + return `Replaces /clean_(image|ami)_name/ in builder configs with "clean_resource_name"` +} diff --git a/fix/fixer_clean_image_name_test.go b/fix/fixer_clean_image_name_test.go new file mode 100644 index 000000000..21799ba5f --- /dev/null +++ b/fix/fixer_clean_image_name_test.go @@ -0,0 +1,52 @@ +package fix + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestFixerCleanImageName_Impl(t *testing.T) { + var raw interface{} + raw = new(FixerCleanImageName) + if _, ok := raw.(Fixer); !ok { + t.Fatalf("must be a Fixer") + } +} + +func TestFixerCleanImageName_Fix(t *testing.T) { + var f FixerCleanImageName + + input := map[string]interface{}{ + "builders": []interface{}{ + map[string]interface{}{ + "type": "foo", + "ami_name": "heyo clean_image_name", + "image_labels": map[string]interface{}{ + "name": "test-packer-{{packer_version | clean_image_name}}", + }, + }, + }, + } + + expected := map[string]interface{}{ + "builders": []map[string]interface{}{ + { + "type": "foo", + "ami_name": "heyo clean_resource_name", + "image_labels": map[string]interface{}{ + "name": "test-packer-{{packer_version | clean_resource_name}}", + }, + }, + }, + } + + output, err := f.Fix(input) + if err != nil { + t.Fatalf("err: %s", err) + } + + if diff := cmp.Diff(expected, output); diff != "" { + t.Fatalf("unexpected output: %s", diff) + } +} From 5ab8cc6ded7c38477f7d4fd30cd2dc8b66fcc4fd Mon Sep 17 00:00:00 2001 From: MisterMiles <26760546+MisterMiles@users.noreply.github.com> Date: Fri, 5 Apr 2019 16:16:25 +0200 Subject: [PATCH 43/47] Update shell-local.html.md The last sentence is misleading because it is assumed that the shell-inline parameter is only executed on the machine which is build by packer. --- website/source/docs/provisioners/shell-local.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/provisioners/shell-local.html.md b/website/source/docs/provisioners/shell-local.html.md index 3dc81fe66..4ce66d6a8 100644 --- a/website/source/docs/provisioners/shell-local.html.md +++ b/website/source/docs/provisioners/shell-local.html.md @@ -51,7 +51,7 @@ Exactly *one* of the following is required: they are all executed within the same context. This allows you to change directories in one command and use something in the directory in the next and so on. Inline scripts are the easiest way to pull off simple tasks - within the machine. + within the machine in which packer is running. - `script` (string) - The path to a script to execute. This path can be absolute or relative. If it is relative, it is relative to the working From 3777f7d34a09bf298900c2b160e2df1435f80806 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 5 Apr 2019 10:39:46 -0700 Subject: [PATCH 44/47] update changelog --- CHANGELOG.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6da4b9d1b..5b3bad8b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,12 @@ * builder/alicloud: Improve error message for conflicting images name [GH-7415] * builder/amazon-chroot: Allow users to specify custom block device mapping [GH-7370] +* builder/ansible: Documentation fix explaining how to use ansible 2.7 + winrm + [GH-7461] * builder/azure-arm: specify zone resilient image from config [GH-7211] +* builder/docker: Add support for windows containers [GH-7444] +* builder/openstack: Allow both ports and networks in openstack builder + [GH-7451] * builder/openstack: Expose force_delete for openstack builder [GH-7395] * builder/OpenStack: Support Application Credential Authentication [GH-7300] * builder/virtualbox: Add validation for 'none' communicator. [GH-7419] @@ -16,6 +21,7 @@ * core: Lock Packer VNC ports using a lock file to prevent collisions [GH-7422] * core: Print VerifyChecksum log for the download as ui.Message output [GH-7387] +* core: Select a new VNC port if initial port is busy [GH-7423] * post-processor/googlecompute-export: Set network project id to builder [GH-7359] * post-processor/vagrant-cloud: support for the vagrant builder [GH-7397] @@ -24,6 +30,8 @@ * postprocessor/amazon-import: Support S3 and AMI encryption. [GH-7396] * provisioner/shell provisioner/windows-shell: allow to specify valid exit codes [GH-7385] +* sensitive-vars: Filter sensitive variables out of the ui as well as the logs + [GH-7462] ### BUG FIXES: * builder/alibaba: Update to latest Alibaba Cloud official image to fix @@ -39,12 +47,23 @@ [GH-7352] ### BACKWARDS INCOMPATIBILITIES: +* builder/amazon: Change `temporary_security_group_source_cidr` to + `temporary_security_group_source_cidrs` and allow it to accept a list of + strings. [GH-7450] * builder/amazon: If users do not pass any encrypt setting, retain any initial encryption setting of the AMI. [GH-6787] * builder/docker: Update docker's default config to use /bin/sh instead of /bin/bash [GH-7106] +* builder/hyperv: Change option names cpu->cpus and ram_size->memory to bring + naming in line with vmware and virtualbox builders [GH-7447] +* builder/oracle-classic: Remove default ssh_username from oracle classic + builder, but add note to docs with oracle's default user. [GH-7446] * builder/scaleway: Renamed attribute api_access_key to organization_id. [GH-6983] +* Change clean_image name and clean_ami_name to a more general clean_resource + name for Googlecompute, Azure, and AWS builders. [GH-7456] +* core/post-processors: Change interface for post-processors to allow an + overridable default for keeping input artifacts. [GH-7463] ## 1.3.5 (February 28, 2019) From 12bd1c5f96085c96c3f81c414ff50b06b53c12a1 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 5 Apr 2019 10:57:47 -0700 Subject: [PATCH 45/47] increase timeout for tests to reduce flakiness in travis --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 4c72465cb..1b84d0c9c 100644 --- a/Makefile +++ b/Makefile @@ -102,7 +102,7 @@ generate: deps ## Generate dynamically generated code gofmt -w command/plugin.go test: fmt-check mode-check vet ## Run unit tests - @go test $(TEST) $(TESTARGS) -timeout=2m + @go test $(TEST) $(TESTARGS) -timeout=3m # testacc runs acceptance tests testacc: deps generate ## Run acceptance tests @@ -110,7 +110,7 @@ testacc: deps generate ## Run acceptance tests PACKER_ACC=1 go test -v $(TEST) $(TESTARGS) -timeout=45m testrace: fmt-check mode-check vet ## Test with race detection enabled - @go test -race $(TEST) $(TESTARGS) -timeout=2m -p=8 + @go test -race $(TEST) $(TESTARGS) -timeout=3m -p=8 updatedeps: @echo "INFO: Packer deps are managed by govendor. See .github/CONTRIBUTING.md" From 65cd5bbd29aa60256fe371ef853c82cf64c40576 Mon Sep 17 00:00:00 2001 From: Calle Pettersson Date: Sat, 6 Apr 2019 08:10:28 +0200 Subject: [PATCH 46/47] Change disk size config parameter name --- builder/proxmox/config.go | 2 +- builder/proxmox/config_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/proxmox/config.go b/builder/proxmox/config.go index e35c408f3..8b60c509f 100644 --- a/builder/proxmox/config.go +++ b/builder/proxmox/config.go @@ -61,7 +61,7 @@ type diskConfig struct { Type string `mapstructure:"type"` StoragePool string `mapstructure:"storage_pool"` StoragePoolType string `mapstructure:"storage_pool_type"` - Size string `mapstructure:"size"` + Size string `mapstructure:"disk_size"` CacheMode string `mapstructure:"cache_mode"` DiskFormat string `mapstructure:"format"` } diff --git a/builder/proxmox/config_test.go b/builder/proxmox/config_test.go index a7496dddd..e04f4a0d5 100644 --- a/builder/proxmox/config_test.go +++ b/builder/proxmox/config_test.go @@ -52,7 +52,7 @@ func TestBasicExampleFromDocsIsValid(t *testing.T) { "disks": [ { "type": "scsi", - "size": "5G", + "disk_size": "5G", "storage_pool": "local-lvm", "storage_pool_type": "lvm" } From efd568c10ca26b40cdfd602c389cc22e708e88c0 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Mon, 8 Apr 2019 09:30:23 -0700 Subject: [PATCH 47/47] fix docs --- website/source/docs/builders/proxmox.html.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/docs/builders/proxmox.html.md b/website/source/docs/builders/proxmox.html.md index f5c4fcb56..ea8beb58d 100644 --- a/website/source/docs/builders/proxmox.html.md +++ b/website/source/docs/builders/proxmox.html.md @@ -108,7 +108,7 @@ builder. [ { "type": "scsi", - "size": "5G", + "disk_size": "5G", "storage_pool": "local-lvm", "storage_pool_type": "lvm" } @@ -125,7 +125,7 @@ builder. - `type` (string) - The type of disk. Can be `scsi`, `sata`, `virtio` or `ide`. Defaults to `scsi`. - - `size` (string) - The size of the disk, including a unit suffix, such + - `disk_size` (string) - The size of the disk, including a unit suffix, such as `10G` to indicate 10 gigabytes. - `cache_mode` (string) - How to cache operations to the disk. Can be @@ -175,7 +175,7 @@ manually downloaded. "disks": [ { "type": "scsi", - "size": "5G", + "disk_size": "5G", "storage_pool": "local-lvm", "storage_pool_type": "lvm" }