packer-cn/provisioner/chef-client/provisioner.go

783 lines
22 KiB
Go
Raw Normal View History

//go:generate mapstructure-to-hcl2 -type Config
2014-01-24 20:06:55 -05:00
// This package implements a provisioner for Packer that uses
// Chef to provision the remote machine, specifically with chef-client (that is,
// with a Chef server).
package chefclient
import (
"bytes"
"context"
2014-01-24 20:06:55 -05:00
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
2014-02-24 11:53:56 -05:00
build using HCL2 (#8423) This follows #8232 which added the code to generate the code required to parse HCL files for each packer component. All old config files of packer will keep on working the same. Packer takes one argument. When a directory is passed, all files in the folder with a name ending with “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format. When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed using the HCL2 format. For every other case; the old packer style will be used. ## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields ## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file. This is a breaking change for packer plugins. a packer component can be a: builder/provisioner/post-processor each component interface now gets a `ConfigSpec() hcldec.ObjectSpec` which allows packer to tell what is the layout of the hcl2 config meant to configure that specific component. This ObjectSpec is sent through the wire (RPC) and a cty.Value is now sent through the already existing configuration entrypoints: Provisioner.Prepare(raws ...interface{}) error Builder.Prepare(raws ...interface{}) ([]string, error) PostProcessor.Configure(raws ...interface{}) error close #1768 Example hcl files: ```hcl // file amazon-ebs-kms-key/run.pkr.hcl build { sources = [ "source.amazon-ebs.first", ] provisioner "shell" { inline = [ "sleep 5" ] } post-processor "shell-local" { inline = [ "sleep 5" ] } } // amazon-ebs-kms-key/source.pkr.hcl source "amazon-ebs" "first" { ami_name = "hcl2-test" region = "us-east-1" instance_type = "t2.micro" kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c" encrypt_boot = true source_ami_filter { filters { virtualization-type = "hvm" name = "amzn-ami-hvm-????.??.?.????????-x86_64-gp2" root-device-type = "ebs" } most_recent = true owners = ["amazon"] } launch_block_device_mappings { device_name = "/dev/xvda" volume_size = 20 volume_type = "gp2" delete_on_termination = "true" } launch_block_device_mappings { device_name = "/dev/xvdf" volume_size = 500 volume_type = "gp2" delete_on_termination = true encrypted = true } ami_regions = ["eu-central-1"] run_tags { Name = "packer-solr-something" stack-name = "DevOps Tools" } communicator = "ssh" ssh_pty = true ssh_username = "ec2-user" associate_public_ip_address = true } ```
2019-12-17 05:25:56 -05:00
"github.com/hashicorp/hcl/v2/hcldec"
2020-12-17 16:29:25 -05:00
"github.com/hashicorp/packer-plugin-sdk/common"
"github.com/hashicorp/packer-plugin-sdk/guestexec"
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
"github.com/hashicorp/packer-plugin-sdk/pathing"
"github.com/hashicorp/packer-plugin-sdk/template/config"
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
"github.com/hashicorp/packer-plugin-sdk/uuid"
2014-01-24 20:06:55 -05:00
)
type guestOSTypeConfig struct {
executeCommand string
installCommand string
2016-06-14 14:34:25 -04:00
knifeCommand string
stagingDir string
}
var guestOSTypeConfigs = map[string]guestOSTypeConfig{
guestexec.UnixOSType: {
executeCommand: "{{if .Sudo}}sudo {{end}}chef-client --no-color -c {{.ConfigPath}} -j {{.JsonPath}}",
installCommand: "curl -L https://omnitruck.chef.io/install.sh | {{if .Sudo}}sudo {{end}}bash -s --{{if .Version}} -v {{.Version}}{{end}}",
2016-06-14 14:34:25 -04:00
knifeCommand: "{{if .Sudo}}sudo {{end}}knife {{.Args}} {{.Flags}}",
stagingDir: "/tmp/packer-chef-client",
},
guestexec.WindowsOSType: {
executeCommand: "c:/opscode/chef/bin/chef-client.bat --no-color -c {{.ConfigPath}} -j {{.JsonPath}}",
installCommand: "powershell.exe -Command \". { iwr -useb https://omnitruck.chef.io/install.ps1 } | iex; Install-Project{{if .Version}} -version {{.Version}}{{end}}\"",
2016-06-14 14:34:25 -04:00
knifeCommand: "c:/opscode/chef/bin/knife.bat {{.Args}} {{.Flags}}",
stagingDir: "C:/Windows/Temp/packer-chef-client",
},
}
2014-01-24 20:06:55 -05:00
type Config struct {
common.PackerConfig `mapstructure:",squash"`
Json map[string]interface{}
ChefEnvironment string `mapstructure:"chef_environment"`
ChefLicense string `mapstructure:"chef_license"`
ClientKey string `mapstructure:"client_key"`
ConfigTemplate string `mapstructure:"config_template"`
ElevatedUser string `mapstructure:"elevated_user"`
ElevatedPassword string `mapstructure:"elevated_password"`
EncryptedDataBagSecretPath string `mapstructure:"encrypted_data_bag_secret_path"`
ExecuteCommand string `mapstructure:"execute_command"`
GuestOSType string `mapstructure:"guest_os_type"`
InstallCommand string `mapstructure:"install_command"`
2016-06-14 14:34:25 -04:00
KnifeCommand string `mapstructure:"knife_command"`
NodeName string `mapstructure:"node_name"`
PolicyGroup string `mapstructure:"policy_group"`
PolicyName string `mapstructure:"policy_name"`
PreventSudo bool `mapstructure:"prevent_sudo"`
RunList []string `mapstructure:"run_list"`
ServerUrl string `mapstructure:"server_url"`
SkipCleanClient bool `mapstructure:"skip_clean_client"`
SkipCleanNode bool `mapstructure:"skip_clean_node"`
SkipCleanStagingDirectory bool `mapstructure:"skip_clean_staging_directory"`
SkipInstall bool `mapstructure:"skip_install"`
SslVerifyMode string `mapstructure:"ssl_verify_mode"`
TrustedCertsDir string `mapstructure:"trusted_certs_dir"`
StagingDir string `mapstructure:"staging_directory"`
ValidationClientName string `mapstructure:"validation_client_name"`
ValidationKeyPath string `mapstructure:"validation_key_path"`
Version string `mapstructure:"version"`
2014-01-24 20:06:55 -05:00
ctx interpolate.Context
2014-01-24 20:06:55 -05:00
}
type Provisioner struct {
config Config
communicator packersdk.Communicator
guestOSTypeConfig guestOSTypeConfig
guestCommands *guestexec.GuestCommands
generatedData map[string]interface{}
2014-01-24 20:06:55 -05:00
}
type ConfigTemplate struct {
ChefEnvironment string
ChefLicense string
ClientKey string
EncryptedDataBagSecretPath string
NodeName string
PolicyGroup string
PolicyName string
ServerUrl string
SslVerifyMode string
TrustedCertsDir string
ValidationClientName string
ValidationKeyPath string
2014-01-24 20:06:55 -05:00
}
type ExecuteTemplate struct {
ConfigPath string
JsonPath string
Sudo bool
}
type InstallChefTemplate struct {
Sudo bool
Version string
2014-01-24 20:06:55 -05:00
}
type KnifeTemplate struct {
Sudo bool
Flags string
Args string
2016-06-14 14:34:25 -04:00
}
build using HCL2 (#8423) This follows #8232 which added the code to generate the code required to parse HCL files for each packer component. All old config files of packer will keep on working the same. Packer takes one argument. When a directory is passed, all files in the folder with a name ending with “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format. When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed using the HCL2 format. For every other case; the old packer style will be used. ## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields ## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file. This is a breaking change for packer plugins. a packer component can be a: builder/provisioner/post-processor each component interface now gets a `ConfigSpec() hcldec.ObjectSpec` which allows packer to tell what is the layout of the hcl2 config meant to configure that specific component. This ObjectSpec is sent through the wire (RPC) and a cty.Value is now sent through the already existing configuration entrypoints: Provisioner.Prepare(raws ...interface{}) error Builder.Prepare(raws ...interface{}) ([]string, error) PostProcessor.Configure(raws ...interface{}) error close #1768 Example hcl files: ```hcl // file amazon-ebs-kms-key/run.pkr.hcl build { sources = [ "source.amazon-ebs.first", ] provisioner "shell" { inline = [ "sleep 5" ] } post-processor "shell-local" { inline = [ "sleep 5" ] } } // amazon-ebs-kms-key/source.pkr.hcl source "amazon-ebs" "first" { ami_name = "hcl2-test" region = "us-east-1" instance_type = "t2.micro" kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c" encrypt_boot = true source_ami_filter { filters { virtualization-type = "hvm" name = "amzn-ami-hvm-????.??.?.????????-x86_64-gp2" root-device-type = "ebs" } most_recent = true owners = ["amazon"] } launch_block_device_mappings { device_name = "/dev/xvda" volume_size = 20 volume_type = "gp2" delete_on_termination = "true" } launch_block_device_mappings { device_name = "/dev/xvdf" volume_size = 500 volume_type = "gp2" delete_on_termination = true encrypted = true } ami_regions = ["eu-central-1"] run_tags { Name = "packer-solr-something" stack-name = "DevOps Tools" } communicator = "ssh" ssh_pty = true ssh_username = "ec2-user" associate_public_ip_address = true } ```
2019-12-17 05:25:56 -05:00
func (p *Provisioner) ConfigSpec() hcldec.ObjectSpec { return p.config.FlatMapstructure().HCL2Spec() }
2014-01-24 20:06:55 -05:00
func (p *Provisioner) Prepare(raws ...interface{}) error {
err := config.Decode(&p.config, &config.DecodeOpts{
PluginType: "chef-client",
Interpolate: true,
InterpolateContext: &p.config.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{
"execute_command",
"install_command",
2016-06-14 14:34:25 -04:00
"knife_command",
},
},
}, raws...)
2014-01-24 20:06:55 -05:00
if err != nil {
return err
}
if p.config.GuestOSType == "" {
p.config.GuestOSType = guestexec.DefaultOSType
}
p.config.GuestOSType = strings.ToLower(p.config.GuestOSType)
var ok bool
p.guestOSTypeConfig, ok = guestOSTypeConfigs[p.config.GuestOSType]
if !ok {
return fmt.Errorf("Invalid guest_os_type: \"%s\"", p.config.GuestOSType)
}
p.guestCommands, err = guestexec.NewGuestCommands(p.config.GuestOSType, !p.config.PreventSudo)
if err != nil {
return fmt.Errorf("Invalid guest_os_type: \"%s\"", p.config.GuestOSType)
}
2014-01-24 20:06:55 -05:00
if p.config.ExecuteCommand == "" {
p.config.ExecuteCommand = p.guestOSTypeConfig.executeCommand
2014-01-24 20:06:55 -05:00
}
if p.config.InstallCommand == "" {
p.config.InstallCommand = p.guestOSTypeConfig.installCommand
2014-01-24 20:06:55 -05:00
}
if p.config.RunList == nil {
p.config.RunList = make([]string, 0)
}
if p.config.StagingDir == "" {
p.config.StagingDir = p.guestOSTypeConfig.stagingDir
2014-01-24 20:06:55 -05:00
}
2016-06-14 14:34:25 -04:00
if p.config.KnifeCommand == "" {
p.config.KnifeCommand = p.guestOSTypeConfig.knifeCommand
}
var errs *packersdk.MultiError
2014-01-24 20:06:55 -05:00
if p.config.ConfigTemplate != "" {
fi, err := os.Stat(p.config.ConfigTemplate)
if err != nil {
errs = packersdk.MultiErrorAppend(
2014-01-24 20:06:55 -05:00
errs, fmt.Errorf("Bad config template path: %s", err))
} else if fi.IsDir() {
errs = packersdk.MultiErrorAppend(
2014-01-24 20:06:55 -05:00
errs, fmt.Errorf("Config template path must be a file: %s", err))
}
}
if p.config.ServerUrl == "" {
errs = packersdk.MultiErrorAppend(
errs, fmt.Errorf("server_url must be set"))
}
if p.config.SkipInstall == false && p.config.InstallCommand == p.guestOSTypeConfig.installCommand {
if p.config.ChefLicense == "" {
p.config.ChefLicense = "accept-silent"
}
}
if p.config.EncryptedDataBagSecretPath != "" {
pFileInfo, err := os.Stat(p.config.EncryptedDataBagSecretPath)
if err != nil || pFileInfo.IsDir() {
errs = packersdk.MultiErrorAppend(
errs, fmt.Errorf("Bad encrypted data bag secret '%s': %s", p.config.EncryptedDataBagSecretPath, err))
}
}
2018-01-31 16:08:25 -05:00
if (p.config.PolicyName != "") != (p.config.PolicyGroup != "") {
errs = packersdk.MultiErrorAppend(errs, fmt.Errorf("If either policy_name or policy_group are set, they must both be set."))
2018-01-31 16:08:25 -05:00
}
jsonValid := true
for k, v := range p.config.Json {
p.config.Json[k], err = p.deepJsonFix(k, v)
if err != nil {
errs = packersdk.MultiErrorAppend(
errs, fmt.Errorf("Error processing JSON: %s", err))
jsonValid = false
}
}
if jsonValid {
// Process the user variables within the JSON and set the JSON.
// Do this early so that we can validate and show errors.
p.config.Json, err = p.processJsonUserVars()
if err != nil {
errs = packersdk.MultiErrorAppend(
errs, fmt.Errorf("Error processing user variables in JSON: %s", err))
}
2014-01-24 20:06:55 -05:00
}
if errs != nil && len(errs.Errors) > 0 {
return errs
}
return nil
}
func (p *Provisioner) Provision(ctx context.Context, ui packersdk.Ui, comm packersdk.Communicator, generatedData map[string]interface{}) error {
p.generatedData = generatedData
p.communicator = comm
nodeName := p.config.NodeName
2014-09-24 14:19:28 -04:00
if nodeName == "" {
nodeName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
}
remoteValidationKeyPath := ""
serverUrl := p.config.ServerUrl
2014-01-24 20:06:55 -05:00
if !p.config.SkipInstall {
if err := p.installChef(ui, comm, p.config.Version); err != nil {
2014-01-24 20:06:55 -05:00
return fmt.Errorf("Error installing Chef: %s", err)
}
}
if err := p.createDir(ui, comm, p.config.StagingDir); err != nil {
return fmt.Errorf("Error creating staging directory: %s", err)
}
if p.config.ClientKey == "" {
p.config.ClientKey = fmt.Sprintf("%s/client.pem", p.config.StagingDir)
}
encryptedDataBagSecretPath := ""
if p.config.EncryptedDataBagSecretPath != "" {
encryptedDataBagSecretPath = fmt.Sprintf("%s/encrypted_data_bag_secret", p.config.StagingDir)
if err := p.uploadFile(ui,
comm,
encryptedDataBagSecretPath,
p.config.EncryptedDataBagSecretPath); err != nil {
return fmt.Errorf("Error uploading encrypted data bag secret: %s", err)
}
}
if p.config.ValidationKeyPath != "" {
2020-12-02 16:19:45 -05:00
path, err := pathing.ExpandUser(p.config.ValidationKeyPath)
if err != nil {
return fmt.Errorf("Error while expanding a tilde in the validation key: %s", err)
}
remoteValidationKeyPath = fmt.Sprintf("%s/validation.pem", p.config.StagingDir)
if err := p.uploadFile(ui, comm, remoteValidationKeyPath, path); err != nil {
return fmt.Errorf("Error copying validation key: %s", err)
}
}
configPath, err := p.createConfig(
ui,
comm,
nodeName,
serverUrl,
p.config.ClientKey,
p.config.ChefLicense,
encryptedDataBagSecretPath,
remoteValidationKeyPath,
p.config.ValidationClientName,
p.config.ChefEnvironment,
p.config.PolicyGroup,
p.config.PolicyName,
p.config.SslVerifyMode,
p.config.TrustedCertsDir)
2014-01-24 20:06:55 -05:00
if err != nil {
return fmt.Errorf("Error creating Chef config file: %s", err)
}
jsonPath, err := p.createJson(ui, comm)
if err != nil {
return fmt.Errorf("Error creating JSON attributes: %s", err)
}
2014-02-24 11:53:56 -05:00
err = p.executeChef(ui, comm, configPath, jsonPath)
Add knife config by template Since the chef-client provisioner is cleaning the node and client at the chef-server from the provisioned node it needs to have a flexible configuration This is replacing the used knife flags: -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' and puts their values into a generated knife.rb Additionally the knife.rb may include the optional ssl_verify_mode attribute to enable the verify mode verify_none Background: When deleting node and client to a self-hosted chef-server using self signed cerfiticates the usage of knife node delete <node-name> -y -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' will lead into a ssl verification failure. The error output of the knife call is somthing like: 2015/06/24 12:29:17 ui: docker: WARNING: No knife configuration file found docker: WARNING: No knife configuration file found 2015/06/24 12:29:17 ui: docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 ui: docker: ERROR: Could not establish a secure connection to the server. docker: ERROR: Could not establish a secure connection to the server. 2015/06/24 12:29:17 ui: docker: Use 'knife ssl check' to troubleshoot your SSL configuration. docker: Use 'knife ssl check' to troubleshoot your SSL configuration. 2015/06/24 12:29:17 ui: docker: If your Chef Server uses a self-signed certificate, you can use docker: If your Chef Server uses a self-signed certificate, you can use 2015/06/24 12:29:17 ui: docker: 'knife ssl fetch' to make knife trust the server's certificates. docker: 'knife ssl fetch' to make knife trust the server's certificates. 2015/06/24 12:29:17 ui: docker: docker: 2015/06/24 12:29:17 ui: docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 packer-builder-docker: 2015/06/24 12:29:17 Executed command exit status: 100
2015-06-24 07:46:59 -04:00
if !(p.config.SkipCleanNode && p.config.SkipCleanClient) {
knifeConfigPath, knifeErr := p.createKnifeConfig(
ui, comm, nodeName, serverUrl, p.config.ClientKey, p.config.SslVerifyMode, p.config.TrustedCertsDir)
if knifeErr != nil {
return fmt.Errorf("Error creating knife config on node: %s", knifeErr)
}
if !p.config.SkipCleanNode {
if err := p.cleanNode(ui, comm, nodeName, knifeConfigPath); err != nil {
return fmt.Errorf("Error cleaning up chef node: %s", err)
}
2014-01-24 20:06:55 -05:00
}
if !p.config.SkipCleanClient {
if err := p.cleanClient(ui, comm, nodeName, knifeConfigPath); err != nil {
return fmt.Errorf("Error cleaning up chef client: %s", err)
}
2014-01-24 20:06:55 -05:00
}
}
2014-02-24 11:53:56 -05:00
if err != nil {
return fmt.Errorf("Error executing Chef: %s", err)
2014-01-24 20:06:55 -05:00
}
if !p.config.SkipCleanStagingDirectory {
if err := p.removeDir(ui, comm, p.config.StagingDir); err != nil {
return fmt.Errorf("Error removing %s: %s", p.config.StagingDir, err)
}
2014-01-24 20:06:55 -05:00
}
return nil
}
func (p *Provisioner) uploadFile(ui packersdk.Ui, comm packersdk.Communicator, remotePath string, localPath string) error {
ui.Message(fmt.Sprintf("Uploading %s...", localPath))
f, err := os.Open(localPath)
if err != nil {
return err
}
defer f.Close()
return comm.Upload(remotePath, f, nil)
}
func (p *Provisioner) createConfig(
ui packersdk.Ui,
comm packersdk.Communicator,
nodeName string,
serverUrl string,
clientKey string,
chefLicense string,
encryptedDataBagSecretPath,
remoteKeyPath string,
validationClientName string,
chefEnvironment string,
2018-01-31 12:06:31 -05:00
policyGroup string,
policyName string,
sslVerifyMode string,
trustedCertsDir string) (string, error) {
2014-01-24 20:06:55 -05:00
ui.Message("Creating configuration file 'client.rb'")
// Read the template
tpl := DefaultConfigTemplate
if p.config.ConfigTemplate != "" {
f, err := os.Open(p.config.ConfigTemplate)
if err != nil {
return "", err
}
defer f.Close()
tplBytes, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
tpl = string(tplBytes)
}
ictx := p.config.ctx
ictx.Data = &ConfigTemplate{
NodeName: nodeName,
ServerUrl: serverUrl,
ClientKey: clientKey,
ChefLicense: chefLicense,
ValidationKeyPath: remoteKeyPath,
ValidationClientName: validationClientName,
ChefEnvironment: chefEnvironment,
PolicyGroup: policyGroup,
PolicyName: policyName,
SslVerifyMode: sslVerifyMode,
TrustedCertsDir: trustedCertsDir,
EncryptedDataBagSecretPath: encryptedDataBagSecretPath,
}
configString, err := interpolate.Render(tpl, &ictx)
2014-01-24 20:06:55 -05:00
if err != nil {
return "", err
}
remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "client.rb"))
if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil {
2014-01-24 20:06:55 -05:00
return "", err
}
return remotePath, nil
}
func (p *Provisioner) createKnifeConfig(ui packersdk.Ui, comm packersdk.Communicator, nodeName string, serverUrl string, clientKey string, sslVerifyMode string, trustedCertsDir string) (string, error) {
Add knife config by template Since the chef-client provisioner is cleaning the node and client at the chef-server from the provisioned node it needs to have a flexible configuration This is replacing the used knife flags: -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' and puts their values into a generated knife.rb Additionally the knife.rb may include the optional ssl_verify_mode attribute to enable the verify mode verify_none Background: When deleting node and client to a self-hosted chef-server using self signed cerfiticates the usage of knife node delete <node-name> -y -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' will lead into a ssl verification failure. The error output of the knife call is somthing like: 2015/06/24 12:29:17 ui: docker: WARNING: No knife configuration file found docker: WARNING: No knife configuration file found 2015/06/24 12:29:17 ui: docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 ui: docker: ERROR: Could not establish a secure connection to the server. docker: ERROR: Could not establish a secure connection to the server. 2015/06/24 12:29:17 ui: docker: Use 'knife ssl check' to troubleshoot your SSL configuration. docker: Use 'knife ssl check' to troubleshoot your SSL configuration. 2015/06/24 12:29:17 ui: docker: If your Chef Server uses a self-signed certificate, you can use docker: If your Chef Server uses a self-signed certificate, you can use 2015/06/24 12:29:17 ui: docker: 'knife ssl fetch' to make knife trust the server's certificates. docker: 'knife ssl fetch' to make knife trust the server's certificates. 2015/06/24 12:29:17 ui: docker: docker: 2015/06/24 12:29:17 ui: docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 packer-builder-docker: 2015/06/24 12:29:17 Executed command exit status: 100
2015-06-24 07:46:59 -04:00
ui.Message("Creating configuration file 'knife.rb'")
// Read the template
tpl := DefaultKnifeTemplate
ictx := p.config.ctx
ictx.Data = &ConfigTemplate{
NodeName: nodeName,
ServerUrl: serverUrl,
ClientKey: clientKey,
SslVerifyMode: sslVerifyMode,
TrustedCertsDir: trustedCertsDir,
Add knife config by template Since the chef-client provisioner is cleaning the node and client at the chef-server from the provisioned node it needs to have a flexible configuration This is replacing the used knife flags: -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' and puts their values into a generated knife.rb Additionally the knife.rb may include the optional ssl_verify_mode attribute to enable the verify mode verify_none Background: When deleting node and client to a self-hosted chef-server using self signed cerfiticates the usage of knife node delete <node-name> -y -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' will lead into a ssl verification failure. The error output of the knife call is somthing like: 2015/06/24 12:29:17 ui: docker: WARNING: No knife configuration file found docker: WARNING: No knife configuration file found 2015/06/24 12:29:17 ui: docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 ui: docker: ERROR: Could not establish a secure connection to the server. docker: ERROR: Could not establish a secure connection to the server. 2015/06/24 12:29:17 ui: docker: Use 'knife ssl check' to troubleshoot your SSL configuration. docker: Use 'knife ssl check' to troubleshoot your SSL configuration. 2015/06/24 12:29:17 ui: docker: If your Chef Server uses a self-signed certificate, you can use docker: If your Chef Server uses a self-signed certificate, you can use 2015/06/24 12:29:17 ui: docker: 'knife ssl fetch' to make knife trust the server's certificates. docker: 'knife ssl fetch' to make knife trust the server's certificates. 2015/06/24 12:29:17 ui: docker: docker: 2015/06/24 12:29:17 ui: docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 packer-builder-docker: 2015/06/24 12:29:17 Executed command exit status: 100
2015-06-24 07:46:59 -04:00
}
configString, err := interpolate.Render(tpl, &ictx)
Add knife config by template Since the chef-client provisioner is cleaning the node and client at the chef-server from the provisioned node it needs to have a flexible configuration This is replacing the used knife flags: -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' and puts their values into a generated knife.rb Additionally the knife.rb may include the optional ssl_verify_mode attribute to enable the verify mode verify_none Background: When deleting node and client to a self-hosted chef-server using self signed cerfiticates the usage of knife node delete <node-name> -y -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' will lead into a ssl verification failure. The error output of the knife call is somthing like: 2015/06/24 12:29:17 ui: docker: WARNING: No knife configuration file found docker: WARNING: No knife configuration file found 2015/06/24 12:29:17 ui: docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 ui: docker: ERROR: Could not establish a secure connection to the server. docker: ERROR: Could not establish a secure connection to the server. 2015/06/24 12:29:17 ui: docker: Use 'knife ssl check' to troubleshoot your SSL configuration. docker: Use 'knife ssl check' to troubleshoot your SSL configuration. 2015/06/24 12:29:17 ui: docker: If your Chef Server uses a self-signed certificate, you can use docker: If your Chef Server uses a self-signed certificate, you can use 2015/06/24 12:29:17 ui: docker: 'knife ssl fetch' to make knife trust the server's certificates. docker: 'knife ssl fetch' to make knife trust the server's certificates. 2015/06/24 12:29:17 ui: docker: docker: 2015/06/24 12:29:17 ui: docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 packer-builder-docker: 2015/06/24 12:29:17 Executed command exit status: 100
2015-06-24 07:46:59 -04:00
if err != nil {
return "", err
}
remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "knife.rb"))
if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil {
return "", err
}
return remotePath, nil
}
func (p *Provisioner) createJson(ui packersdk.Ui, comm packersdk.Communicator) (string, error) {
2014-01-24 20:06:55 -05:00
ui.Message("Creating JSON attribute file")
jsonData := make(map[string]interface{})
// Copy the configured JSON
for k, v := range p.config.Json {
jsonData[k] = v
}
// Set the run list if it was specified
if len(p.config.RunList) > 0 {
jsonData["run_list"] = p.config.RunList
}
jsonBytes, err := json.MarshalIndent(jsonData, "", " ")
if err != nil {
return "", err
}
// Upload the bytes
remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "first-boot.json"))
if err := comm.Upload(remotePath, bytes.NewReader(jsonBytes), nil); err != nil {
2014-01-24 20:06:55 -05:00
return "", err
}
return remotePath, nil
}
func (p *Provisioner) createDir(ui packersdk.Ui, comm packersdk.Communicator, dir string) error {
ctx := context.TODO()
2014-01-24 20:06:55 -05:00
ui.Message(fmt.Sprintf("Creating directory: %s", dir))
cmd := &packersdk.RemoteCmd{Command: p.guestCommands.CreateDir(dir)}
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more info.")
2014-01-24 20:06:55 -05:00
}
// Chmod the directory to 0777 just so that we can access it as our user
cmd = &packersdk.RemoteCmd{Command: p.guestCommands.Chmod(dir, "0777")}
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
2014-01-24 20:06:55 -05:00
return err
}
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more info.")
2014-01-24 20:06:55 -05:00
}
return nil
}
func (p *Provisioner) cleanNode(ui packersdk.Ui, comm packersdk.Communicator, node string, knifeConfigPath string) error {
2014-01-24 20:06:55 -05:00
ui.Say("Cleaning up chef node...")
args := []string{"node", "delete", node}
Add knife config by template Since the chef-client provisioner is cleaning the node and client at the chef-server from the provisioned node it needs to have a flexible configuration This is replacing the used knife flags: -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' and puts their values into a generated knife.rb Additionally the knife.rb may include the optional ssl_verify_mode attribute to enable the verify mode verify_none Background: When deleting node and client to a self-hosted chef-server using self signed cerfiticates the usage of knife node delete <node-name> -y -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' will lead into a ssl verification failure. The error output of the knife call is somthing like: 2015/06/24 12:29:17 ui: docker: WARNING: No knife configuration file found docker: WARNING: No knife configuration file found 2015/06/24 12:29:17 ui: docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 ui: docker: ERROR: Could not establish a secure connection to the server. docker: ERROR: Could not establish a secure connection to the server. 2015/06/24 12:29:17 ui: docker: Use 'knife ssl check' to troubleshoot your SSL configuration. docker: Use 'knife ssl check' to troubleshoot your SSL configuration. 2015/06/24 12:29:17 ui: docker: If your Chef Server uses a self-signed certificate, you can use docker: If your Chef Server uses a self-signed certificate, you can use 2015/06/24 12:29:17 ui: docker: 'knife ssl fetch' to make knife trust the server's certificates. docker: 'knife ssl fetch' to make knife trust the server's certificates. 2015/06/24 12:29:17 ui: docker: docker: 2015/06/24 12:29:17 ui: docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 packer-builder-docker: 2015/06/24 12:29:17 Executed command exit status: 100
2015-06-24 07:46:59 -04:00
if err := p.knifeExec(ui, comm, node, knifeConfigPath, args); err != nil {
return fmt.Errorf("Failed to cleanup node: %s", err)
2014-01-24 20:06:55 -05:00
}
return nil
}
func (p *Provisioner) cleanClient(ui packersdk.Ui, comm packersdk.Communicator, node string, knifeConfigPath string) error {
2014-01-24 20:06:55 -05:00
ui.Say("Cleaning up chef client...")
args := []string{"client", "delete", node}
Add knife config by template Since the chef-client provisioner is cleaning the node and client at the chef-server from the provisioned node it needs to have a flexible configuration This is replacing the used knife flags: -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' and puts their values into a generated knife.rb Additionally the knife.rb may include the optional ssl_verify_mode attribute to enable the verify mode verify_none Background: When deleting node and client to a self-hosted chef-server using self signed cerfiticates the usage of knife node delete <node-name> -y -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' will lead into a ssl verification failure. The error output of the knife call is somthing like: 2015/06/24 12:29:17 ui: docker: WARNING: No knife configuration file found docker: WARNING: No knife configuration file found 2015/06/24 12:29:17 ui: docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 ui: docker: ERROR: Could not establish a secure connection to the server. docker: ERROR: Could not establish a secure connection to the server. 2015/06/24 12:29:17 ui: docker: Use 'knife ssl check' to troubleshoot your SSL configuration. docker: Use 'knife ssl check' to troubleshoot your SSL configuration. 2015/06/24 12:29:17 ui: docker: If your Chef Server uses a self-signed certificate, you can use docker: If your Chef Server uses a self-signed certificate, you can use 2015/06/24 12:29:17 ui: docker: 'knife ssl fetch' to make knife trust the server's certificates. docker: 'knife ssl fetch' to make knife trust the server's certificates. 2015/06/24 12:29:17 ui: docker: docker: 2015/06/24 12:29:17 ui: docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 packer-builder-docker: 2015/06/24 12:29:17 Executed command exit status: 100
2015-06-24 07:46:59 -04:00
if err := p.knifeExec(ui, comm, node, knifeConfigPath, args); err != nil {
return fmt.Errorf("Failed to cleanup client: %s", err)
}
2014-01-24 20:06:55 -05:00
return nil
}
2014-01-24 20:06:55 -05:00
func (p *Provisioner) knifeExec(ui packersdk.Ui, comm packersdk.Communicator, node string, knifeConfigPath string, args []string) error {
flags := []string{
"-y",
Add knife config by template Since the chef-client provisioner is cleaning the node and client at the chef-server from the provisioned node it needs to have a flexible configuration This is replacing the used knife flags: -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' and puts their values into a generated knife.rb Additionally the knife.rb may include the optional ssl_verify_mode attribute to enable the verify mode verify_none Background: When deleting node and client to a self-hosted chef-server using self signed cerfiticates the usage of knife node delete <node-name> -y -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' will lead into a ssl verification failure. The error output of the knife call is somthing like: 2015/06/24 12:29:17 ui: docker: WARNING: No knife configuration file found docker: WARNING: No knife configuration file found 2015/06/24 12:29:17 ui: docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 ui: docker: ERROR: Could not establish a secure connection to the server. docker: ERROR: Could not establish a secure connection to the server. 2015/06/24 12:29:17 ui: docker: Use 'knife ssl check' to troubleshoot your SSL configuration. docker: Use 'knife ssl check' to troubleshoot your SSL configuration. 2015/06/24 12:29:17 ui: docker: If your Chef Server uses a self-signed certificate, you can use docker: If your Chef Server uses a self-signed certificate, you can use 2015/06/24 12:29:17 ui: docker: 'knife ssl fetch' to make knife trust the server's certificates. docker: 'knife ssl fetch' to make knife trust the server's certificates. 2015/06/24 12:29:17 ui: docker: docker: 2015/06/24 12:29:17 ui: docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 packer-builder-docker: 2015/06/24 12:29:17 Executed command exit status: 100
2015-06-24 07:46:59 -04:00
"-c", knifeConfigPath,
}
ctx := context.TODO()
2014-01-24 20:06:55 -05:00
p.config.ctx.Data = &KnifeTemplate{
Sudo: !p.config.PreventSudo,
2016-06-14 14:34:25 -04:00
Flags: strings.Join(flags, " "),
Args: strings.Join(args, " "),
}
command, err := interpolate.Render(p.config.KnifeCommand, &p.config.ctx)
if err != nil {
return err
}
cmd := &packersdk.RemoteCmd{Command: command}
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
2014-01-24 20:06:55 -05:00
return err
}
if cmd.ExitStatus() != 0 {
return fmt.Errorf(
"Non-zero exit status. See output above for more info.\n\n"+
"Command: %s",
2016-06-14 14:34:25 -04:00
command)
}
2014-01-24 20:06:55 -05:00
return nil
}
func (p *Provisioner) removeDir(ui packersdk.Ui, comm packersdk.Communicator, dir string) error {
2014-01-24 20:06:55 -05:00
ui.Message(fmt.Sprintf("Removing directory: %s", dir))
ctx := context.TODO()
cmd := &packersdk.RemoteCmd{Command: p.guestCommands.RemoveDir(dir)}
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
2014-01-24 20:06:55 -05:00
return err
}
return nil
}
func (p *Provisioner) executeChef(ui packersdk.Ui, comm packersdk.Communicator, config string, json string) error {
p.config.ctx.Data = &ExecuteTemplate{
2014-01-24 20:06:55 -05:00
ConfigPath: config,
JsonPath: json,
Sudo: !p.config.PreventSudo,
}
ctx := context.TODO()
command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx)
2014-01-24 20:06:55 -05:00
if err != nil {
return err
}
if p.config.ElevatedUser != "" {
command, err = guestexec.GenerateElevatedRunner(command, p)
if err != nil {
return err
}
}
2014-01-24 20:06:55 -05:00
ui.Message(fmt.Sprintf("Executing Chef: %s", command))
cmd := &packersdk.RemoteCmd{
2014-01-24 20:06:55 -05:00
Command: command,
}
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
2014-01-24 20:06:55 -05:00
return err
}
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus())
2014-01-24 20:06:55 -05:00
}
return nil
}
func (p *Provisioner) installChef(ui packersdk.Ui, comm packersdk.Communicator, version string) error {
2014-01-24 20:06:55 -05:00
ui.Message("Installing Chef...")
ctx := context.TODO()
2014-01-24 20:06:55 -05:00
p.config.ctx.Data = &InstallChefTemplate{
Sudo: !p.config.PreventSudo,
Version: version,
}
command, err := interpolate.Render(p.config.InstallCommand, &p.config.ctx)
2014-01-24 20:06:55 -05:00
if err != nil {
return err
}
ui.Message(command)
cmd := &packersdk.RemoteCmd{Command: command}
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
2014-01-24 20:06:55 -05:00
return err
}
if cmd.ExitStatus() != 0 {
2014-01-24 20:06:55 -05:00
return fmt.Errorf(
"Install script exited with non-zero exit status %d", cmd.ExitStatus())
2014-01-24 20:06:55 -05:00
}
return nil
}
func (p *Provisioner) deepJsonFix(key string, current interface{}) (interface{}, error) {
if current == nil {
return nil, nil
}
switch c := current.(type) {
case []interface{}:
val := make([]interface{}, len(c))
for i, v := range c {
var err error
val[i], err = p.deepJsonFix(fmt.Sprintf("%s[%d]", key, i), v)
if err != nil {
return nil, err
}
}
return val, nil
case []uint8:
return string(c), nil
case map[interface{}]interface{}:
val := make(map[string]interface{})
for k, v := range c {
ks, ok := k.(string)
if !ok {
return nil, fmt.Errorf("%s: key is not string", key)
}
var err error
val[ks], err = p.deepJsonFix(
fmt.Sprintf("%s.%s", key, ks), v)
if err != nil {
return nil, err
}
}
return val, nil
default:
return current, nil
}
}
2014-01-24 20:06:55 -05:00
func (p *Provisioner) processJsonUserVars() (map[string]interface{}, error) {
jsonBytes, err := json.Marshal(p.config.Json)
if err != nil {
// This really shouldn't happen since we literally just unmarshalled
panic(err)
}
// Copy the user variables so that we can restore them later, and
// make sure we make the quotes JSON-friendly in the user variables.
originalUserVars := make(map[string]string)
for k, v := range p.config.ctx.UserVariables {
2014-01-24 20:06:55 -05:00
originalUserVars[k] = v
}
// Make sure we reset them no matter what
defer func() {
p.config.ctx.UserVariables = originalUserVars
2014-01-24 20:06:55 -05:00
}()
// Make the current user variables JSON string safe.
for k, v := range p.config.ctx.UserVariables {
2014-01-24 20:06:55 -05:00
v = strings.Replace(v, `\`, `\\`, -1)
v = strings.Replace(v, `"`, `\"`, -1)
p.config.ctx.UserVariables[k] = v
2014-01-24 20:06:55 -05:00
}
// Process the bytes with the template processor
p.config.ctx.Data = nil
jsonBytesProcessed, err := interpolate.Render(string(jsonBytes), &p.config.ctx)
2014-01-24 20:06:55 -05:00
if err != nil {
return nil, err
}
var result map[string]interface{}
if err := json.Unmarshal([]byte(jsonBytesProcessed), &result); err != nil {
return nil, err
}
return result, nil
}
func (p *Provisioner) Communicator() packersdk.Communicator {
return p.communicator
}
func (p *Provisioner) ElevatedUser() string {
return p.config.ElevatedUser
}
func (p *Provisioner) ElevatedPassword() string {
// Replace ElevatedPassword for winrm users who used this feature
p.config.ctx.Data = p.generatedData
elevatedPassword, _ := interpolate.Render(p.config.ElevatedPassword, &p.config.ctx)
return elevatedPassword
}
2014-01-24 20:06:55 -05:00
var DefaultConfigTemplate = `
log_level :info
log_location STDOUT
chef_server_url "{{.ServerUrl}}"
client_key "{{.ClientKey}}"
chef_license "{{.ChefLicense}}"
{{if ne .EncryptedDataBagSecretPath ""}}
encrypted_data_bag_secret "{{.EncryptedDataBagSecretPath}}"
{{end}}
{{if ne .ValidationClientName ""}}
validation_client_name "{{.ValidationClientName}}"
{{else}}
2014-01-24 20:06:55 -05:00
validation_client_name "chef-validator"
{{end}}
{{if ne .ValidationKeyPath ""}}
validation_key "{{.ValidationKeyPath}}"
{{end}}
2014-01-24 20:06:55 -05:00
node_name "{{.NodeName}}"
2014-05-21 09:24:34 -04:00
{{if ne .ChefEnvironment ""}}
environment "{{.ChefEnvironment}}"
{{end}}
{{if ne .PolicyGroup ""}}
policy_group "{{.PolicyGroup}}"
{{end}}
{{if ne .PolicyName ""}}
policy_name "{{.PolicyName}}"
{{end}}
{{if ne .SslVerifyMode ""}}
ssl_verify_mode :{{.SslVerifyMode}}
{{end}}
{{if ne .TrustedCertsDir ""}}
trusted_certs_dir "{{.TrustedCertsDir}}"
{{end}}
2014-01-24 20:06:55 -05:00
`
Add knife config by template Since the chef-client provisioner is cleaning the node and client at the chef-server from the provisioned node it needs to have a flexible configuration This is replacing the used knife flags: -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' and puts their values into a generated knife.rb Additionally the knife.rb may include the optional ssl_verify_mode attribute to enable the verify mode verify_none Background: When deleting node and client to a self-hosted chef-server using self signed cerfiticates the usage of knife node delete <node-name> -y -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' will lead into a ssl verification failure. The error output of the knife call is somthing like: 2015/06/24 12:29:17 ui: docker: WARNING: No knife configuration file found docker: WARNING: No knife configuration file found 2015/06/24 12:29:17 ui: docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 ui: docker: ERROR: Could not establish a secure connection to the server. docker: ERROR: Could not establish a secure connection to the server. 2015/06/24 12:29:17 ui: docker: Use 'knife ssl check' to troubleshoot your SSL configuration. docker: Use 'knife ssl check' to troubleshoot your SSL configuration. 2015/06/24 12:29:17 ui: docker: If your Chef Server uses a self-signed certificate, you can use docker: If your Chef Server uses a self-signed certificate, you can use 2015/06/24 12:29:17 ui: docker: 'knife ssl fetch' to make knife trust the server's certificates. docker: 'knife ssl fetch' to make knife trust the server's certificates. 2015/06/24 12:29:17 ui: docker: docker: 2015/06/24 12:29:17 ui: docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 packer-builder-docker: 2015/06/24 12:29:17 Executed command exit status: 100
2015-06-24 07:46:59 -04:00
var DefaultKnifeTemplate = `
log_level :info
log_location STDOUT
chef_server_url "{{.ServerUrl}}"
client_key "{{.ClientKey}}"
node_name "{{.NodeName}}"
{{if ne .SslVerifyMode ""}}
ssl_verify_mode :{{.SslVerifyMode}}
{{end}}
{{if ne .TrustedCertsDir ""}}
trusted_certs_dir "{{.TrustedCertsDir}}"
{{end}}
Add knife config by template Since the chef-client provisioner is cleaning the node and client at the chef-server from the provisioned node it needs to have a flexible configuration This is replacing the used knife flags: -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' and puts their values into a generated knife.rb Additionally the knife.rb may include the optional ssl_verify_mode attribute to enable the verify mode verify_none Background: When deleting node and client to a self-hosted chef-server using self signed cerfiticates the usage of knife node delete <node-name> -y -s '<chef-server-url>' -k '/tmp/packer-chef-client/client.pem' -u '<client-name>' will lead into a ssl verification failure. The error output of the knife call is somthing like: 2015/06/24 12:29:17 ui: docker: WARNING: No knife configuration file found docker: WARNING: No knife configuration file found 2015/06/24 12:29:17 ui: docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 ui: docker: ERROR: Could not establish a secure connection to the server. docker: ERROR: Could not establish a secure connection to the server. 2015/06/24 12:29:17 ui: docker: Use 'knife ssl check' to troubleshoot your SSL configuration. docker: Use 'knife ssl check' to troubleshoot your SSL configuration. 2015/06/24 12:29:17 ui: docker: If your Chef Server uses a self-signed certificate, you can use docker: If your Chef Server uses a self-signed certificate, you can use 2015/06/24 12:29:17 ui: docker: 'knife ssl fetch' to make knife trust the server's certificates. docker: 'knife ssl fetch' to make knife trust the server's certificates. 2015/06/24 12:29:17 ui: docker: docker: 2015/06/24 12:29:17 ui: docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 packer-builder-docker: 2015/06/24 12:29:17 Executed command exit status: 100
2015-06-24 07:46:59 -04:00
`