Merge branch 'master' of https://github.com/hashicorp/packer into patch-1

This commit is contained in:
Tobias 2017-08-08 13:27:45 +00:00
commit 8a9c2b6959
49 changed files with 517 additions and 3018 deletions

View File

@ -1,7 +1,21 @@
## UNRELEASED
### IMPROVEMENTS:
* builder/alicloud: Increase polling timeout. [GH-5148]
* builder/parallels: Remove soon to be removed --vmtype flag in createvm.
[GH-5172]
* contrib: add json files to zsh completion. [GH-5195]
### BUG FIXES:
* builder/amazon: fix builds when using the null communicator. [GH-5217]
* core: Strip query parameters from ISO URLs when checking against a checksum
file. [GH-5181]
## 1.0.3 (July 17, 2017) ## 1.0.3 (July 17, 2017)
### IMRPOVEMENTS: ### IMPROVEMENTS:
* builder/Azure: Update to latest Azure SDK, enabling support for managed * builder/azure: Update to latest Azure SDK, enabling support for managed
disks. [GH-4511] disks. [GH-4511]
* builder/cloudstack: Add default cidr_list [ 0.0.0.0/0 ]. [GH-5125] * builder/cloudstack: Add default cidr_list [ 0.0.0.0/0 ]. [GH-5125]
* builder/cloudstack: Add support for ssh_agent_auth. [GH-5130] * builder/cloudstack: Add support for ssh_agent_auth. [GH-5130]

View File

@ -5,6 +5,7 @@ package ecs
import ( import (
"log" "log"
"fmt"
"github.com/hashicorp/packer/common" "github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/helper/communicator" "github.com/hashicorp/packer/helper/communicator"
"github.com/hashicorp/packer/helper/config" "github.com/hashicorp/packer/helper/config"
@ -98,7 +99,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
PrivateKeyFile: b.config.Comm.SSHPrivateKey, PrivateKeyFile: b.config.Comm.SSHPrivateKey,
TemporaryKeyPairName: b.config.TemporaryKeyPairName, TemporaryKeyPairName: b.config.TemporaryKeyPairName,
SSHAgentAuth: b.config.Comm.SSHAgentAuth, SSHAgentAuth: b.config.Comm.SSHAgentAuth,
//DebugKeyPath: b.config.Com DebugKeyPath: fmt.Sprintf("ecs_%s.pem", b.config.PackerBuildName),
RegionId: b.config.AlicloudRegion, RegionId: b.config.AlicloudRegion,
}, },
} }

View File

@ -7,6 +7,7 @@ import (
"github.com/denverdino/aliyungo/ecs" "github.com/denverdino/aliyungo/ecs"
"github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"time"
) )
type stepAttachKeyPar struct { type stepAttachKeyPar struct {
@ -21,7 +22,7 @@ func (s *stepAttachKeyPar) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ecs.Client) client := state.Get("client").(*ecs.Client)
config := state.Get("config").(Config) config := state.Get("config").(Config)
instance := state.Get("instance").(*ecs.InstanceAttributesType) instance := state.Get("instance").(*ecs.InstanceAttributesType)
retry_times := 3 timeoutPoint := time.Now().Add(120 * time.Second)
for { for {
err := client.AttachKeyPair(&ecs.AttachKeyPairArgs{RegionId: common.Region(config.AlicloudRegion), err := client.AttachKeyPair(&ecs.AttachKeyPairArgs{RegionId: common.Region(config.AlicloudRegion),
KeyPairName: keyPairName, InstanceIds: "[\"" + instance.InstanceId + "\"]"}) KeyPairName: keyPairName, InstanceIds: "[\"" + instance.InstanceId + "\"]"})
@ -29,8 +30,8 @@ func (s *stepAttachKeyPar) Run(state multistep.StateBag) multistep.StepAction {
e, _ := err.(*common.Error) e, _ := err.(*common.Error)
if (!(e.Code == "MissingParameter" || e.Code == "DependencyViolation.WindowsInstance" || if (!(e.Code == "MissingParameter" || e.Code == "DependencyViolation.WindowsInstance" ||
e.Code == "InvalidKeyPairName.NotFound" || e.Code == "InvalidRegionId.NotFound")) && e.Code == "InvalidKeyPairName.NotFound" || e.Code == "InvalidRegionId.NotFound")) &&
retry_times > 0 { time.Now().Before(timeoutPoint) {
retry_times = retry_times - 1 time.Sleep(5 * time.Second)
continue continue
} }
err := fmt.Errorf("Error attaching keypair %s to instance %s : %s", err := fmt.Errorf("Error attaching keypair %s to instance %s : %s",

View File

@ -121,12 +121,12 @@ func (s *stepConfigAlicloudSecurityGroup) Cleanup(state multistep.StateBag) {
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
message(state, "security group") message(state, "security group")
start := time.Now().Add(10 * time.Second) timeoutPoint := time.Now().Add(120 * time.Second)
for { for {
if err := client.DeleteSecurityGroup(common.Region(s.RegionId), s.SecurityGroupId); err != nil { if err := client.DeleteSecurityGroup(common.Region(s.RegionId), s.SecurityGroupId); err != nil {
e, _ := err.(*common.Error) e, _ := err.(*common.Error)
if e.Code == "DependencyViolation" && time.Now().Before(start) { if e.Code == "DependencyViolation" && time.Now().Before(timeoutPoint) {
time.Sleep(1 * time.Second) time.Sleep(5 * time.Second)
continue continue
} }
ui.Error(fmt.Sprintf("Failed to delete security group, it may still be around: %s", err)) ui.Error(fmt.Sprintf("Failed to delete security group, it may still be around: %s", err))

View File

@ -78,13 +78,13 @@ func (s *stepConfigAlicloudVPC) Cleanup(state multistep.StateBag) {
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
message(state, "VPC") message(state, "VPC")
start := time.Now().Add(10 * time.Second) timeoutPoint := time.Now().Add(60 * time.Second)
for { for {
if err := client.DeleteVpc(s.VpcId); err != nil { if err := client.DeleteVpc(s.VpcId); err != nil {
e, _ := err.(*common.Error) e, _ := err.(*common.Error)
if (e.Code == "DependencyViolation.Instance" || e.Code == "DependencyViolation.RouteEntry" || if (e.Code == "DependencyViolation.Instance" || e.Code == "DependencyViolation.RouteEntry" ||
e.Code == "DependencyViolation.VSwitch" || e.Code == "DependencyViolation.VSwitch" ||
e.Code == "DependencyViolation.SecurityGroup") && time.Now().Before(start) { e.Code == "DependencyViolation.SecurityGroup") && time.Now().Before(timeoutPoint) {
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
continue continue
} }

View File

@ -113,7 +113,7 @@ func (s *stepConfigAlicloudVSwitch) Run(state multistep.StateBag) multistep.Step
} }
if err := client.WaitForVSwitchAvailable(vpcId, s.VSwitchId, ALICLOUD_DEFAULT_TIMEOUT); err != nil { if err := client.WaitForVSwitchAvailable(vpcId, s.VSwitchId, ALICLOUD_DEFAULT_TIMEOUT); err != nil {
state.Put("error", err) state.Put("error", err)
ui.Error(fmt.Sprintf("Timeout waiting for vswitch to become avaiable: %v", err)) ui.Error(fmt.Sprintf("Timeout waiting for vswitch to become available: %v", err))
return multistep.ActionHalt return multistep.ActionHalt
} }
state.Put("vswitchid", vswitchId) state.Put("vswitchid", vswitchId)
@ -130,13 +130,13 @@ func (s *stepConfigAlicloudVSwitch) Cleanup(state multistep.StateBag) {
client := state.Get("client").(*ecs.Client) client := state.Get("client").(*ecs.Client)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
message(state, "vSwitch") message(state, "vSwitch")
start := time.Now().Add(10 * time.Second) timeoutPoint := time.Now().Add(10 * time.Second)
for { for {
if err := client.DeleteVSwitch(s.VSwitchId); err != nil { if err := client.DeleteVSwitch(s.VSwitchId); err != nil {
e, _ := err.(*common.Error) e, _ := err.(*common.Error)
if (e.Code == "IncorrectVSwitchStatus" || e.Code == "DependencyViolation" || if (e.Code == "IncorrectVSwitchStatus" || e.Code == "DependencyViolation" ||
e.Code == "DependencyViolation.HaVip" || e.Code == "DependencyViolation.HaVip" ||
e.Code == "IncorretRouteEntryStatus") && time.Now().Before(start) { e.Code == "IncorretRouteEntryStatus") && time.Now().Before(timeoutPoint) {
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
continue continue
} }

View File

@ -45,8 +45,10 @@ func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction {
port := s.CommConfig.Port() port := s.CommConfig.Port()
if port == 0 { if port == 0 {
if s.CommConfig.Type != "none" {
panic("port must be set to a non-zero value.") panic("port must be set to a non-zero value.")
} }
}
// Create the group // Create the group
groupName := fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID()) groupName := fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID())

View File

@ -63,7 +63,7 @@ func TestBMPString(t *testing.T) {
// some character outside the BMP should error // some character outside the BMP should error
tst = "\U0001f000 East wind (Mahjong)" tst = "\U0001f000 East wind (Mahjong)"
str, err = bmpString(tst) _, err = bmpString(tst)
if err == nil { if err == nil {
t.Errorf("expected '%s' to throw error because the first character is not in the BMP", tst) t.Errorf("expected '%s' to throw error because the first character is not in the BMP", tst)
} }

View File

@ -21,7 +21,6 @@ func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher
k := deriveKeyByAlg[algorithmName](params.Salt, password, params.Iterations) k := deriveKeyByAlg[algorithmName](params.Salt, password, params.Iterations)
iv := deriveIVByAlg[algorithmName](params.Salt, password, params.Iterations) iv := deriveIVByAlg[algorithmName](params.Salt, password, params.Iterations)
password = nil
code, err := blockcodeByAlg[algorithmName](k) code, err := blockcodeByAlg[algorithmName](k)
if err != nil { if err != nil {
@ -34,7 +33,6 @@ func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher
func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) {
cbc, err := pbDecrypterFor(info.GetAlgorithm(), password) cbc, err := pbDecrypterFor(info.GetAlgorithm(), password)
password = nil
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -113,7 +113,6 @@ func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID
for len(P) < times*v { for len(P) < times*v {
P = append(P, password...) P = append(P, password...)
} }
password = nil
P = P[:times*v] P = P[:times*v]
} }

View File

@ -85,12 +85,12 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
} }
imageTransfer, _, err := client.ImageActions.Transfer(context.TODO(), images[0].ID, transferRequest) imageTransfer, _, err := client.ImageActions.Transfer(context.TODO(), images[0].ID, transferRequest)
if err != nil { if err != nil {
err := fmt.Errorf("Error transfering snapshot: %s", err) err := fmt.Errorf("Error transferring snapshot: %s", err)
state.Put("error", err) state.Put("error", err)
ui.Error(err.Error()) ui.Error(err.Error())
return multistep.ActionHalt return multistep.ActionHalt
} }
ui.Say(fmt.Sprintf("Transfering Snapshot ID: %d", imageTransfer.ID)) ui.Say(fmt.Sprintf("transferring Snapshot ID: %d", imageTransfer.ID))
if err := waitForImageState(godo.ActionCompleted, imageTransfer.ID, action.ID, if err := waitForImageState(godo.ActionCompleted, imageTransfer.ID, action.ID,
client, 20*time.Minute); err != nil { client, 20*time.Minute); err != nil {
// If we get an error the first time, actually report it // If we get an error the first time, actually report it

View File

@ -21,7 +21,7 @@ GetMetadata () {
echo "$(curl -f -H "Metadata-Flavor: Google" ${BASEMETADATAURL}/${1} 2> /dev/null)" echo "$(curl -f -H "Metadata-Flavor: Google" ${BASEMETADATAURL}/${1} 2> /dev/null)"
} }
ZONE=$(GetMetadata zone | grep -oP "[^/]*$") ZONE=$(basename $(GetMetadata zone))
SetMetadata () { SetMetadata () {
gcloud compute instances add-metadata ${HOSTNAME} --metadata ${1}=${2} --zone ${ZONE} gcloud compute instances add-metadata ${HOSTNAME} --metadata ${1}=${2} --zone ${ZONE}

View File

@ -27,7 +27,6 @@ func (s *stepCreateVM) Run(state multistep.StateBag) multistep.StepAction {
"create", name, "create", name,
"--distribution", config.GuestOSType, "--distribution", config.GuestOSType,
"--dst", config.OutputDir, "--dst", config.OutputDir,
"--vmtype", "vm",
"--no-hdd", "--no-hdd",
} }

View File

@ -136,7 +136,13 @@ func (c *ISOConfig) Prepare(ctx *interpolate.Context) (warnings []string, errs [
} }
func (c *ISOConfig) parseCheckSumFile(rd *bufio.Reader) error { func (c *ISOConfig) parseCheckSumFile(rd *bufio.Reader) error {
errNotFound := fmt.Errorf("No checksum for %q found at: %s", filepath.Base(c.ISOUrls[0]), c.ISOChecksumURL) u, err := url.Parse(c.ISOUrls[0])
if err != nil {
return err
}
filename := filepath.Base(u.Path)
errNotFound := fmt.Errorf("No checksum for %q found at: %s", filename, c.ISOChecksumURL)
for { for {
line, err := rd.ReadString('\n') line, err := rd.ReadString('\n')
if err != nil && line == "" { if err != nil && line == "" {
@ -148,7 +154,7 @@ func (c *ISOConfig) parseCheckSumFile(rd *bufio.Reader) error {
} }
if strings.ToLower(parts[0]) == c.ISOChecksumType { if strings.ToLower(parts[0]) == c.ISOChecksumType {
// BSD-style checksum // BSD-style checksum
if parts[1] == fmt.Sprintf("(%s)", filepath.Base(c.ISOUrls[0])) { if parts[1] == fmt.Sprintf("(%s)", filename) {
c.ISOChecksum = parts[3] c.ISOChecksum = parts[3]
return nil return nil
} }
@ -158,7 +164,7 @@ func (c *ISOConfig) parseCheckSumFile(rd *bufio.Reader) error {
// Binary mode // Binary mode
parts[1] = parts[1][1:] parts[1] = parts[1][1:]
} }
if parts[1] == filepath.Base(c.ISOUrls[0]) { if parts[1] == filename {
c.ISOChecksum = parts[0] c.ISOChecksum = parts[0]
return nil return nil
} }

View File

@ -152,6 +152,25 @@ func TestISOConfigPrepare_ISOChecksumURL(t *testing.T) {
t.Fatalf("should've found \"bar0\" got: %s", i.ISOChecksum) t.Fatalf("should've found \"bar0\" got: %s", i.ISOChecksum)
} }
// Test good - ISOChecksumURL GNU style with query parameters
i = testISOConfig()
i.ISOChecksum = ""
i.RawSingleISOUrl = "http://www.packer.io/the-OS.iso?stuff=boo"
cs_file, _ = ioutil.TempFile("", "packer-test-")
ioutil.WriteFile(cs_file.Name(), []byte(cs_gnu_style), 0666)
i.ISOChecksumURL = fmt.Sprintf("%s%s", filePrefix, cs_file.Name())
warns, err = i.Prepare(nil)
if len(warns) > 0 {
t.Fatalf("bad: %#v", warns)
}
if err != nil {
t.Fatalf("should not have error: %s", err)
}
if i.ISOChecksum != "bar0" {
t.Fatalf("should've found \"bar0\" got: %s", i.ISOChecksum)
}
} }
func TestISOConfigPrepare_ISOChecksumType(t *testing.T) { func TestISOConfigPrepare_ISOChecksumType(t *testing.T) {

View File

@ -1,66 +1,68 @@
#compdef packer #compdef packer
local -a _packer_cmds _packer () {
_packer_cmds=( local -a sub_commands && sub_commands=(
'build:Build image(s) from template' 'build:Build image(s) from template'
'fix:Fixes templates from old versions of packer' 'fix:Fixes templates from old versions of packer'
'inspect:See components of a template' 'inspect:See components of a template'
'push:Push template files to a Packer build service' 'push:Push template files to a Packer build service'
'validate:Check that a template is valid' 'validate:Check that a template is valid'
'version:Prints the Packer version' 'version:Prints the Packer version'
) )
__build() { local -a build_arguments && build_arguments=(
_arguments \ '-debug[Debug mode enabled for builds]'
'-debug[Debug mode enabled for builds]' \ '-force[Force a build to continue if artifacts exist, deletes existing artifacts]'
'-force[Force a build to continue if artifacts exist, deletes existing artifacts]' \
'-machine-readable[Machine-readable output]' \
'-except=[(foo,bar,baz) Build all builds other than these]' \
'-only=[(foo,bar,baz) Only build the given builds by name]' \
'-parallel=[(false) Disable parallelization (on by default)]' \
'-var[("key=value") Variable for templates, can be used multiple times.]' \
'-var-file=[(path) JSON file containing user variables.]'
}
__inspect() {
_arguments \
'-machine-readable[Machine-readable output]' '-machine-readable[Machine-readable output]'
} '-except=[(foo,bar,baz) Build all builds other than these]'
'-only=[(foo,bar,baz) Only build the given builds by name]'
__push() { '-parallel=[(false) Disable parallelization (on by default)]'
_arguments \ '-var[("key=value") Variable for templates, can be used multiple times.]'
'-name=[(<name>) The destination build in Atlas.]' \
'-token=[(<token>) Access token to use to upload.]' \
'-var[("key=value") Variable for templates, can be used multiple times.]' \
'-var-file=[(path) JSON file containing user variables.]' '-var-file=[(path) JSON file containing user variables.]'
} '(-)*:files:_files -g "*.json"'
)
__validate() { local -a inspect_arguments && inspect_arguments=(
_arguments \ '-machine-readable[Machine-readable output]'
'-syntax-only[Only check syntax. Do not verify config of the template.]' \ '(-)*:files:_files -g "*.json"'
'-except=[(foo,bar,baz) Validate all builds other than these]' \ )
'-only=[(foo,bar,baz) Validate only these builds]' \
'-var[("key=value") Variable for templates, can be used multiple times.]' \ local -a push_arguments && push_arguments=(
'-name=[(<name>) The destination build in Atlas.]'
'-token=[(<token>) Access token to use to upload.]'
'-var[("key=value") Variable for templates, can be used multiple times.]'
'-var-file=[(path) JSON file containing user variables.]' '-var-file=[(path) JSON file containing user variables.]'
} '(-)*:files:_files -g "*.json"'
)
local -a validate_arguments && validate_arguments=(
'-syntax-only[Only check syntax. Do not verify config of the template.]'
'-except=[(foo,bar,baz) Validate all builds other than these]'
'-only=[(foo,bar,baz) Validate only these builds]'
'-var[("key=value") Variable for templates, can be used multiple times.]'
'-var-file=[(path) JSON file containing user variables.]'
'(-)*:files:_files -g "*.json"'
)
_arguments '*:: :->command' _arguments -C \
':command:->command' \
'*::options:->options'
if (( CURRENT == 1 )); then case $state in
_describe -t commands "packer command" _packer_cmds command)
return _describe -t commands 'command' sub_commands ;;
fi options)
case $line[1] in
local -a _command_args
case "$words[1]" in
build) build)
__build ;; _arguments -s -S : $build_arguments ;;
inspect) inspect)
__inspect ;; _arguments -s -S : $inspect_arguments ;;
push) push)
__push ;; _arguments -s -S : $push_arguments ;;
validate) validate)
__validate ;; _arguments -s -S : $validate_arguments ;;
esac esac
;;
esac
}
_packer "$@"

View File

@ -8,8 +8,8 @@
"access_key":"{{user `access_key`}}", "access_key":"{{user `access_key`}}",
"secret_key":"{{user `secret_key`}}", "secret_key":"{{user `secret_key`}}",
"region":"cn-beijing", "region":"cn-beijing",
"image_name":"packer_basi", "image_name":"packer_basic",
"source_image":"ubuntu_16_0402_64_40G_base_20170222.vhd", "source_image":"centos_7_2_64_40G_base_20170222.vhd",
"ssh_username":"root", "ssh_username":"root",
"instance_type":"ecs.n1.tiny", "instance_type":"ecs.n1.tiny",
"io_optimized":"true" "io_optimized":"true"
@ -18,7 +18,7 @@
"type": "shell", "type": "shell",
"inline": [ "inline": [
"sleep 30", "sleep 30",
"apt-get update -yy" "yum install redis.x86_64 -y"
] ]
}] }]
} }

View File

@ -9,7 +9,7 @@
"secret_key":"{{user `secret_key`}}", "secret_key":"{{user `secret_key`}}",
"region":"cn-beijing", "region":"cn-beijing",
"image_name":"packer_test", "image_name":"packer_test",
"source_image":"win2012_64_datactr_r2_en_40G_alibase_20160622.vhd", "source_image":"win2008r2_64_ent_sp1_zh-cn_40G_alibase_20170622.vhd",
"instance_type":"ecs.n1.tiny", "instance_type":"ecs.n1.tiny",
"io_optimized":"true", "io_optimized":"true",
"image_force_delete":"true", "image_force_delete":"true",

View File

@ -9,7 +9,7 @@
"secret_key":"{{user `secret_key`}}", "secret_key":"{{user `secret_key`}}",
"region":"cn-beijing", "region":"cn-beijing",
"image_name":"packer_with_data_disk", "image_name":"packer_with_data_disk",
"source_image":"ubuntu_16_0402_64_40G_base_20170222.vhd", "source_image":"centos_7_2_64_40G_base_20170222.vhd",
"ssh_username":"root", "ssh_username":"root",
"instance_type":"ecs.n1.tiny", "instance_type":"ecs.n1.tiny",
"io_optimized":"true", "io_optimized":"true",
@ -19,7 +19,7 @@
"type": "shell", "type": "shell",
"inline": [ "inline": [
"sleep 30", "sleep 30",
"apt-get update -yy" "yum install redis.x86_64 -y"
] ]
}] }]
} }

View File

@ -9,7 +9,7 @@
"secret_key":"{{user `secret_key`}}", "secret_key":"{{user `secret_key`}}",
"region":"cn-beijing", "region":"cn-beijing",
"image_name":"packer_chef2", "image_name":"packer_chef2",
"source_image":"ubuntu_14_0405_64_40G_base_20170222.vhd", "source_image":"ubuntu_14_0405_64_40G_alibase_20170625.vhd",
"ssh_username":"root", "ssh_username":"root",
"instance_type":"ecs.n1.medium", "instance_type":"ecs.n1.medium",
"io_optimized":"true", "io_optimized":"true",

View File

@ -1,4 +1,5 @@
#!/bin/sh #!/bin/sh
#if the related deb pkg not found, please replace with it other avaiable repository url
HOSTNAME=`ifconfig eth1|grep 'inet addr'|cut -d ":" -f2|cut -d " " -f1` HOSTNAME=`ifconfig eth1|grep 'inet addr'|cut -d ":" -f2|cut -d " " -f1`
if [ not $HOSTNAME ] ; then if [ not $HOSTNAME ] ; then
HOSTNAME=`ifconfig eth0|grep 'inet addr'|cut -d ":" -f2|cut -d " " -f1` HOSTNAME=`ifconfig eth0|grep 'inet addr'|cut -d ":" -f2|cut -d " " -f1`

View File

@ -0,0 +1,31 @@
{
"variables": {
"access_key": "{{env `ALICLOUD_ACCESS_KEY`}}",
"secret_key": "{{env `ALICLOUD_SECRET_KEY`}}"
},
"builders": [{
"type":"alicloud-ecs",
"access_key":"{{user `access_key`}}",
"secret_key":"{{user `secret_key`}}",
"region":"cn-beijing",
"image_name":"packer_jenkins",
"source_image":"ubuntu_14_0405_64_40G_alibase_20170625.vhd",
"ssh_username":"root",
"instance_type":"ecs.n1.medium",
"io_optimized":"true",
"image_force_delete":"true",
"ssh_password":"Test12345"
}],
"provisioners": [{
"type": "file",
"source": "examples/alicloud/jenkins/jenkins.sh",
"destination": "/root/"
},{
"type": "shell",
"inline": [
"cd /root/",
"chmod 755 jenkins.sh",
"./jenkins.sh"
]
}]
}

View File

@ -0,0 +1,48 @@
#!/bin/sh
JENKINS_URL='http://mirrors.jenkins.io/war-stable/2.32.2/jenkins.war'
TOMCAT_VERSION='7.0.77'
TOMCAT_NAME="apache-tomcat-$TOMCAT_VERSION"
TOMCAT_PACKAGE="$TOMCAT_NAME.tar.gz"
TOMCAT_URL="http://mirror.bit.edu.cn/apache/tomcat/tomcat-7/v$TOMCAT_VERSION/bin/$TOMCAT_PACKAGE"
TOMCAT_PATH="/opt/$TOMCAT_NAME"
#install jdk
if grep -Eqi "Ubuntu|Debian|Raspbian" /etc/issue || grep -Eq "Ubuntu|Debian|Raspbian" /etc/*-release; then
sudo apt-get update -y
sudo apt-get install -y openjdk-7-jdk
elif grep -Eqi "CentOS|Fedora|Red Hat Enterprise Linux Server" /etc/issue || grep -Eq "CentOS|Fedora|Red Hat Enterprise Linux Server" /etc/*-release; then
sudo yum update -y
sudo yum install -y openjdk-7-jdk
else
echo "Unknown OS type."
fi
#install jenkins server
mkdir ~/work
cd ~/work
#install tomcat
wget $TOMCAT_URL
tar -zxvf $TOMCAT_PACKAGE
mv $TOMCAT_NAME /opt
#install
wget $JENKINS_URL
mv jenkins.war $TOMCAT_PATH/webapps/
#set emvironment
echo "TOMCAT_PATH=\"$TOMCAT_PATH\"">>/etc/profile
echo "JENKINS_HOME=\"$TOMCAT_PATH/webapps/jenkins\"">>/etc/profile
echo PATH="\"\$PATH:\$TOMCAT_PATH:\$JENKINS_HOME\"">>/etc/profile
. /etc/profile
#start tomcat & jenkins
$TOMCAT_PATH/bin/startup.sh
#set start on boot
sed -i "/#!\/bin\/sh/a$TOMCAT_PATH/bin/startup.sh" /etc/rc.local
#clean
rm -rf ~/work

View File

@ -0,0 +1,60 @@
{"variables": {
"box_basename": "centos-6.8",
"build_timestamp": "{{isotime \"20060102150405\"}}",
"cpus": "1",
"disk_size": "4096",
"git_revision": "__unknown_git_revision__",
"headless": "",
"http_proxy": "{{env `http_proxy`}}",
"https_proxy": "{{env `https_proxy`}}",
"iso_checksum": "0ca12fe5f28c2ceed4f4084b41ff8a0b",
"iso_checksum_type": "md5",
"iso_name": "CentOS-6.8-x86_64-minimal.iso",
"ks_path": "centos-6.8/ks.cfg",
"memory": "512",
"metadata": "floppy/dummy_metadata.json",
"mirror": "http://mirrors.aliyun.com/centos",
"mirror_directory": "6.8/isos/x86_64",
"name": "centos-6.8",
"no_proxy": "{{env `no_proxy`}}",
"template": "centos-6.8-x86_64",
"version": "2.1.TIMESTAMP"
},
"builders":[
{
"boot_command": [
"<tab> text ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/{{user `ks_path`}}<enter><wait>"
],
"boot_wait": "10s",
"disk_size": "{{user `disk_size`}}",
"headless": "{{ user `headless` }}",
"http_directory": "http",
"iso_checksum": "{{user `iso_checksum`}}",
"iso_checksum_type": "{{user `iso_checksum_type`}}",
"iso_url": "{{user `mirror`}}/{{user `mirror_directory`}}/{{user `iso_name`}}",
"output_directory": "packer-{{user `template`}}-qemu",
"shutdown_command": "echo 'vagrant'|sudo -S /sbin/halt -h -p",
"ssh_password": "vagrant",
"ssh_port": 22,
"ssh_username": "root",
"ssh_wait_timeout": "10000s",
"type": "qemu",
"vm_name": "{{ user `template` }}.raw",
"net_device": "virtio-net",
"disk_interface": "virtio",
"format": "raw"
}
],
"post-processors":[
{
"type":"alicloud-import",
"oss_bucket_name": "packer",
"image_name": "packer_import",
"image_os_type": "linux",
"image_platform": "CentOS",
"image_architecture": "x86_64",
"image_system_size": "40",
"region":"cn-beijing"
}
]
}

View File

@ -0,0 +1,69 @@
install
cdrom
lang en_US.UTF-8
keyboard us
network --bootproto=dhcp
rootpw vagrant
firewall --disabled
selinux --permissive
timezone UTC
unsupported_hardware
bootloader --location=mbr
text
skipx
zerombr
clearpart --all --initlabel
autopart
auth --enableshadow --passalgo=sha512 --kickstart
firstboot --disabled
reboot
user --name=vagrant --plaintext --password vagrant
key --skip
%packages --nobase --ignoremissing --excludedocs
# vagrant needs this to copy initial files via scp
openssh-clients
sudo
kernel-headers
kernel-devel
gcc
make
perl
wget
nfs-utils
-fprintd-pam
-intltool
# unnecessary firmware
-aic94xx-firmware
-atmel-firmware
-b43-openfwwf
-bfa-firmware
-ipw2100-firmware
-ipw2200-firmware
-ivtv-firmware
-iwl100-firmware
-iwl1000-firmware
-iwl3945-firmware
-iwl4965-firmware
-iwl5000-firmware
-iwl5150-firmware
-iwl6000-firmware
-iwl6000g2a-firmware
-iwl6050-firmware
-libertas-usb8388-firmware
-ql2100-firmware
-ql2200-firmware
-ql23xx-firmware
-ql2400-firmware
-ql2500-firmware
-rt61pci-firmware
-rt73usb-firmware
-xorg-x11-drv-ati-firmware
-zd1211-firmware
%post
# Force to set SELinux to a permissive mode
sed -i -e 's/\(^SELINUX=\).*$/\1permissive/' /etc/selinux/config
# sudo
echo "%vagrant ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers.d/vagrant

View File

@ -2,12 +2,13 @@ package plugin
import ( import (
"fmt" "fmt"
"github.com/hashicorp/packer/packer"
"log" "log"
"os" "os"
"os/exec" "os/exec"
"testing" "testing"
"time" "time"
"github.com/hashicorp/packer/packer"
) )
func helperProcess(s ...string) *exec.Cmd { func helperProcess(s ...string) *exec.Cmd {
@ -48,7 +49,7 @@ func TestHelperProcess(*testing.T) {
os.Exit(2) os.Exit(2)
} }
cmd, args := args[0], args[1:] cmd, _ := args[0], args[1:]
switch cmd { switch cmd {
case "bad-version": case "bad-version":
fmt.Printf("%s1|tcp|:1234\n", APIVersion) fmt.Printf("%s1|tcp|:1234\n", APIVersion)

View File

@ -144,7 +144,7 @@ func (p *PostProcessor) Configure(raws ...interface{}) error {
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
// todo: remove/reword after the migration // todo: remove/reword after the migration
if p.config.Type == "vagrant.box" { if p.config.Type == "vagrant.box" {
return nil, false, fmt.Errorf("As of June 27th, Vagrant-related functionality has been removed from Terraform\n" + return nil, false, fmt.Errorf("Vagrant-related functionality has been removed from Terraform\n" +
"Enterprise into its own product, Vagrant Cloud. For more information see\n" + "Enterprise into its own product, Vagrant Cloud. For more information see\n" +
"https://www.vagrantup.com/docs/vagrant-cloud/vagrant-cloud-migration.html\n" + "https://www.vagrantup.com/docs/vagrant-cloud/vagrant-cloud-migration.html\n" +
"Please replace the Atlas post-processor with the Vagrant Cloud post-processor,\n" + "Please replace the Atlas post-processor with the Vagrant Cloud post-processor,\n" +

View File

@ -2,9 +2,10 @@ package vagrantcloud
import ( import (
"fmt" "fmt"
"strings"
"github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"strings"
) )
type stepReleaseVersion struct { type stepReleaseVersion struct {
@ -30,7 +31,10 @@ func (s *stepReleaseVersion) Run(state multistep.StateBag) multistep.StepAction
if err != nil || (resp.StatusCode != 200) { if err != nil || (resp.StatusCode != 200) {
cloudErrors := &VagrantCloudErrors{} cloudErrors := &VagrantCloudErrors{}
err = decodeBody(resp, cloudErrors) if err := decodeBody(resp, cloudErrors); err != nil {
state.Put("error", fmt.Errorf("Error parsing provider response: %s", err))
return multistep.ActionHalt
}
if strings.Contains(cloudErrors.FormatErrors(), "already been released") { if strings.Contains(cloudErrors.FormatErrors(), "already been released") {
ui.Message("Not releasing version, already released") ui.Message("Not releasing version, already released")
return multistep.ActionContinue return multistep.ActionContinue

View File

@ -41,6 +41,9 @@ func TestProvisionerPrepare_extractScript(t *testing.T) {
// File contents should contain 2 lines concatenated by newlines: foo\nbar // File contents should contain 2 lines concatenated by newlines: foo\nbar
readFile, err := ioutil.ReadFile(file) readFile, err := ioutil.ReadFile(file)
expectedContents := "foo\nbar\n" expectedContents := "foo\nbar\n"
if err != nil {
t.Fatalf("Should not be error: %s", err)
}
s := string(readFile[:]) s := string(readFile[:])
if s != expectedContents { if s != expectedContents {
t.Fatalf("Expected generated inlineScript to equal '%s', got '%s'", expectedContents, s) t.Fatalf("Expected generated inlineScript to equal '%s', got '%s'", expectedContents, s)

View File

@ -200,6 +200,9 @@ func TestProvisionerPrepare_facterFacts(t *testing.T) {
delete(config, "facter") delete(config, "facter")
p = new(Provisioner) p = new(Provisioner)
err = p.Prepare(config) err = p.Prepare(config)
if err != nil {
t.Fatalf("err: %s", err)
}
if p.config.Facter == nil { if p.config.Facter == nil {
t.Fatalf("err: Default facts are not set in the Puppet provisioner!") t.Fatalf("err: Default facts are not set in the Puppet provisioner!")
} }

View File

@ -1,10 +1,11 @@
package puppetserver package puppetserver
import ( import (
"github.com/hashicorp/packer/packer"
"io/ioutil" "io/ioutil"
"os" "os"
"testing" "testing"
"github.com/hashicorp/packer/packer"
) )
func testConfig() map[string]interface{} { func testConfig() map[string]interface{} {
@ -167,6 +168,9 @@ func TestProvisionerPrepare_facterFacts(t *testing.T) {
delete(config, "facter") delete(config, "facter")
p = new(Provisioner) p = new(Provisioner)
err = p.Prepare(config) err = p.Prepare(config)
if err != nil {
t.Fatalf("err: %s", err)
}
if p.config.Facter == nil { if p.config.Facter == nil {
t.Fatalf("err: Default facts are not set in the Puppet provisioner!") t.Fatalf("err: Default facts are not set in the Puppet provisioner!")
} }

View File

@ -4,13 +4,14 @@ import (
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"github.com/hashicorp/packer/packer"
"io/ioutil" "io/ioutil"
"log" "log"
"os" "os"
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/hashicorp/packer/packer"
) )
func testConfig() map[string]interface{} { func testConfig() map[string]interface{} {
@ -34,6 +35,9 @@ func TestProvisionerPrepare_extractScript(t *testing.T) {
// File contents should contain 2 lines concatenated by newlines: foo\nbar // File contents should contain 2 lines concatenated by newlines: foo\nbar
readFile, err := ioutil.ReadFile(file) readFile, err := ioutil.ReadFile(file)
if err != nil {
t.Fatalf("Should not be error: %s", err)
}
expectedContents := "foo\nbar\n" expectedContents := "foo\nbar\n"
s := string(readFile[:]) s := string(readFile[:])
if s != expectedContents { if s != expectedContents {

View File

@ -10,15 +10,12 @@
"type": "docker", "type": "docker",
"image": "hashicorp/middleman-hashicorp:0.3.28", "image": "hashicorp/middleman-hashicorp:0.3.28",
"discard": "true", "discard": "true",
"run_command": ["-d", "-i", "-t", "{{ .Image }}", "/bin/sh"] "volumes": {
"{{ pwd }}": "/website"
}
} }
], ],
"provisioners": [ "provisioners": [
{
"type": "file",
"source": ".",
"destination": "/website"
},
{ {
"type": "shell", "type": "shell",
"environment_vars": [ "environment_vars": [
@ -30,7 +27,7 @@
"inline": [ "inline": [
"bundle check || bundle install", "bundle check || bundle install",
"bundle exec middleman build", "bundle exec middleman build",
"/bin/sh ./scripts/deploy.sh" "/bin/bash ./scripts/deploy.sh"
] ]
} }
] ]

55
website/redirects.txt Normal file
View File

@ -0,0 +1,55 @@
#
# REDIRECTS FILE
#
# This is a sample redirect file. Redirects allow individual projects to add
# their own redirect rules in a declarative manner using Fastly edge
# dictionaries.
#
# FORMAT
#
# Redirects are in the format. There must be at least one space between the
# original path and the new path, and there must be exactly two entries per
# line.
#
# /original-path /new-path
#
# GLOB MATCHING
#
# Because of the way lookup tables work, there is no support for glob matching.
# Fastly does not provide a way to iterate through the lookup table, so it is
# not possible to run through the table and find anything that matches. As such
# URLs must match directly.
#
# More complex redirects are possible, but must be added directly to the
# configuration. Please contact the release engineering team for assistance.
#
# DELETING
#
# Deleting items is not supported at this time. To delete an item, contact the
# release engineering team and they will delete the dictionary item.
#
# MISC
#
# - Blank lines are ignored
# - Comments are hash-style
# - URLs are limited to 256 characters
# - Items are case-sensitive (please use all lowercase)
#
/docs/installation.html /docs/install/index.html
/docs/command-line/machine-readable.html /docs/commands/index.html
/docs/command-line/introduction.html /docs/commands/index.html
/docs/templates/introduction.html /docs/templates/index.html
/docs/builders/azure-arm.html /docs/builders/azure.html
/docs/templates/veewee-to-packer.html /guides/veewee-to-packer.html
/docs/extend/developing-plugins.html /docs/extending/plugins.html
/docs/extending/developing-plugins.html /docs/extending/plugins.html
/docs/extend/builder.html /docs/extending/custom-builders.html
/docs/getting-started/setup.html /docs/getting-started/install.html
/docs/other/community.html /downloads-community.html
/community /community.html
/community/index.html /community.html
/docs/other/environmental-variables.html /docs/other/environment-variables.html
/docs/platforms.html /docs/builders/index.html
/intro/platforms.html /docs/builders/index.html
/docs/templates/configuration-templates.html /docs/templates/engine.html

View File

@ -1,9 +1,10 @@
#!/bin/bash #!/usr/bin/env bash
set -e set -e
PROJECT="packer" PROJECT="packer"
PROJECT_URL="www.packer.io" PROJECT_URL="www.packer.io"
FASTLY_SERVICE_ID="7GrxRJP3PVBuqQbyxYQ0MV" FASTLY_SERVICE_ID="7GrxRJP3PVBuqQbyxYQ0MV"
FASTLY_DICTIONARY_ID="7CE9Ko06dSFrv8XqDgMZvo"
# Ensure the proper AWS environment variables are set # Ensure the proper AWS environment variables are set
if [ -z "$AWS_ACCESS_KEY_ID" ]; then if [ -z "$AWS_ACCESS_KEY_ID" ]; then
@ -93,7 +94,76 @@ if [ -z "$NO_UPLOAD" ]; then
modify "s3://hc-sites/$PROJECT/latest/" modify "s3://hc-sites/$PROJECT/latest/"
fi fi
# Perform a soft-purge of the surrogate key. # Add redirects if they exist
if [ -z "$NO_REDIRECTS" ] || [ ! test -f "./redirects.txt" ]; then
echo "Adding redirects..."
fields=()
while read -r line; do
[[ "$line" =~ ^#.* ]] && continue
[[ -z "$line" ]] && continue
# Read fields
IFS=" " read -ra parts <<<"$line"
fields+=("${parts[@]}")
done < "./redirects.txt"
# Check we have pairs
if [ $((${#fields[@]} % 2)) -ne 0 ]; then
echo "Bad redirects (not an even number)!"
exit 1
fi
# Check we don't have more than 1000 entries (yes, it says 2000 below, but that
# is because we've split into multiple lines).
if [ "${#fields}" -gt 2000 ]; then
echo "More than 1000 entries!"
exit 1
fi
# Validations
for field in "${fields[@]}"; do
if [ "${#field}" -gt 256 ]; then
echo "'$field' is > 256 characters!"
exit 1
fi
if [ "${field:0:1}" != "/" ]; then
echo "'$field' does not start with /!"
exit 1
fi
done
# Build the payload for single-request updates.
jq_args=()
jq_query="."
for (( i=0; i<${#fields[@]}; i+=2 )); do
original="${fields[i]}"
redirect="${fields[i+1]}"
echo "Redirecting ${original} -> ${redirect}"
jq_args+=(--arg "key$((i/2))" "${original}")
jq_args+=(--arg "value$((i/2))" "${redirect}")
jq_query+="| .items |= (. + [{op: \"upsert\", item_key: \$key$((i/2)), item_value: \$value$((i/2))}])"
done
# Do not post empty items (the API gets sad)
if [ "${#jq_args[@]}" -ne 0 ]; then
json="$(jq "${jq_args[@]}" "${jq_query}" <<<'{"items": []}')"
# Post the JSON body
curl \
--fail \
--silent \
--output /dev/null \
--request "PATCH" \
--header "Fastly-Key: $FASTLY_API_KEY" \
--header "Content-type: application/json" \
--header "Accept: application/json" \
--data "$json"\
"https://api.fastly.com/service/$FASTLY_SERVICE_ID/dictionary/$FASTLY_DICTIONARY_ID/items"
fi
fi
# Perform a purge of the surrogate key.
if [ -z "$NO_PURGE" ]; then if [ -z "$NO_PURGE" ]; then
echo "Purging Fastly cache..." echo "Purging Fastly cache..."
curl \ curl \
@ -118,8 +188,13 @@ if [ -z "$NO_WARM" ]; then
echo "wget --recursive --delete-after https://$PROJECT_URL/" echo "wget --recursive --delete-after https://$PROJECT_URL/"
echo "" echo ""
wget \ wget \
--recursive \
--delete-after \ --delete-after \
--quiet \ --level inf \
--no-directories \
--no-host-directories \
--no-verbose \
--page-requisites \
--recursive \
--spider \
"https://$PROJECT_URL/" "https://$PROJECT_URL/"
fi fi

View File

@ -32,96 +32,3 @@ description: |-
Paid <a href="https://www.hashicorp.com/training.html">HashiCorp training courses</a> Paid <a href="https://www.hashicorp.com/training.html">HashiCorp training courses</a>
are also available in a city near you. Private training courses are also available. are also available in a city near you. Private training courses are also available.
</p> </p>
<h1>People</h1>
<p>
The following people are some of the faces behind Packer. They each
contribute to Packer in some core way. Over time, faces may appear and
disappear from this list as contributors come and go. In addition to
the faces below, Packer is a project by
<a href="https://www.hashicorp.com">HashiCorp</a>, so many HashiCorp
employees actively contribute to Packer.
</p>
<div class="people">
<div class="person">
<img class="pull-left" src="https://www.gravatar.com/avatar/54079122b67de9677c1f93933ce8b63a.png?s=125">
<div class="bio">
<h3>Mitchell Hashimoto (<a href="https://github.com/mitchellh">@mitchellh</a>)</h3>
<p>
Mitchell Hashimoto is the creator of Packer. He developed the
core of Packer as well as the Amazon, VirtualBox, and VMware
builders. In addition to Packer, Mitchell is the creator of
<a href="https://www.vagrantup.com">Vagrant</a>. He is self
described as "automation obsessed."
</p>
</div>
</div>
<div class="person">
<img class="pull-left" src="https://www.gravatar.com/avatar/2acc31dd6370a54b18f6755cd0710ce6.png?s=125">
<div class="bio">
<h3>Jack Pearkes (<a href="https://github.com/pearkes">@pearkes</a>)</h3>
<p>
<a href="http://jack.ly/">Jack Pearkes</a> created and maintains the DigitalOcean builder
for Packer. Outside of Packer, Jack is an avid open source
contributor and software consultant.
</p>
</div>
</div>
<div class="person">
<img class="pull-left" src="https://www.gravatar.com/avatar/2f7fc9cb7558e3ea48f5a86fa90a78da.png?s=125">
<div class="bio">
<h3>Mark Peek (<a href="https://github.com/markpeek">@markpeek</a>)</h3>
<p>
In addition to Packer, Mark Peek helps maintain
various open source projects such as
<a href="https://github.com/cloudtools">cloudtools</a> and
<a href="https://github.com/ironport">IronPort Python libraries</a>.
Mark is also a <a href="https://FreeBSD.org">FreeBSD committer</a>.
</p>
</div>
</div>
<div class="person">
<img class="pull-left" src="https://www.gravatar.com/avatar/1fca64df3d7db1e2f258a8956d2b0aff.png?s=125">
<div class="bio">
<h3>Ross Smith II (<a href="https://github.com/rasa" target="_blank" rel="nofollow noopener noreferrer">@rasa</a>)</h3>
<p>
<a href="http://smithii.com/" target="_blank" rel="nofollow noopener noreferrer">Ross Smith</a> maintains our
VMware builder on Windows, and provides other valuable assistance. Ross is an
open source enthusiast, published author, and freelance consultant.
</p>
</div>
</div>
<div class="person">
<img class="pull-left" src="https://www.gravatar.com/avatar/c9f6bf7b5b865012be5eded656ebed7d.png?s=125">
<div class="bio">
<h3>Rickard von Essen<br/>(<a href="https://github.com/rickard-von-essen" target="_blank" rel="nofollow noopener noreferrer">@rickard-von-essen</a>)</h3>
<p>
Rickard von Essen maintains our Parallels Desktop builder. Rickard is an
polyglot programmer and consults on Continuous Delivery.
</p>
</div>
</div>
<div class="person">
<img class="pull-left" src="https://secure.gravatar.com/avatar/f1695dcf6a21f90f5db84b2eee2cbdbe?s=125">
<div class="bio">
<h3>Matt Hooker (<a href="https://github.com/mwhooker" target="_blank" rel="nofollow noopener noreferrer">@mwhooker</a>)</h3>
<p><a href="https://twitter.com/mwhooker" target="_blank" rel="nofollow
noopener noreferrer">Matt</a> maintains Packer for HashiCorp. After
picking up Chef for a job, he decided that continually provisioning the
same machine was bound for trouble. Luckily Packer had just been created,
and was the answer to his prayers. Now he works on it professionally, and
couldn't be happier.
</p>
</div>
</div>
<div class="person">
<img class="pull-left" src="https://secure.gravatar.com/avatar/858a1ad10ef732e5e309460fb643cc23?s=125">
<div class="bio">
<h3>Megan Marsh (<a href="https://github.com/swampdragons" target="_blank" rel="nofollow noopener noreferrer">@swampdragons</a>)</h3>
<p><a href="https://twitter.com/swampdragons" target="_blank" rel="nofollow
noopener noreferrer">Megan</a> maintains Packer for HashiCorp; in her past life she used Packer and Vagrant in her work as a cloud infrastructure developer.
</p>
</div>
</div>
<div class="clearfix">
</div>
</div>

View File

@ -281,7 +281,7 @@ the credentials provided in the builder config's `account_file`.
## Gotchas ## Gotchas
Centos and recent Debian images have root ssh access disabled by default. Set `ssh_username` to CentOS and recent Debian images have root ssh access disabled by default. Set `ssh_username` to
any user, which will be created by packer with sudo access. any user, which will be created by packer with sudo access.
The machine type must have a scratch disk, which means you can't use an The machine type must have a scratch disk, which means you can't use an

View File

@ -85,9 +85,6 @@ builder.
and "other". This can be omitted only if `parallels_tools_mode` and "other". This can be omitted only if `parallels_tools_mode`
is "disable". is "disable".
- `ssh_username` (string) - The username to use to SSH into the machine once
the OS is installed.
### Optional: ### Optional:
- `boot_command` (array of strings) - This is an array of commands to type - `boot_command` (array of strings) - This is an array of commands to type

View File

@ -62,9 +62,6 @@ builder.
- `source_path` (string) - The path to a PVM directory that acts as the source - `source_path` (string) - The path to a PVM directory that acts as the source
of this build. of this build.
- `ssh_username` (string) - The username to use to SSH into the machine once
the OS is installed.
### Optional: ### Optional:
- `boot_command` (array of strings) - This is an array of commands to type - `boot_command` (array of strings) - This is an array of commands to type

View File

@ -109,9 +109,6 @@ Linux server and have not enabled X11 forwarding (`ssh -X`).
boot directly from it. When passing a path to an IMG or QCOW2 file, you boot directly from it. When passing a path to an IMG or QCOW2 file, you
should set `disk_image` to "true". should set `disk_image` to "true".
- `ssh_username` (string) - The username to use to SSH into the machine once
the OS is installed.
### Optional: ### Optional:
- `accelerator` (string) - The accelerator type to use when running the VM. - `accelerator` (string) - The accelerator type to use when running the VM.

View File

@ -77,12 +77,6 @@ builder.
This URL can be either an HTTP URL or a file URL (or path to a file). If This URL can be either an HTTP URL or a file URL (or path to a file). If
this is an HTTP URL, Packer will download it and cache it between runs. this is an HTTP URL, Packer will download it and cache it between runs.
- `ssh_username` (string) - The username to use to SSH into the machine once
the OS is installed.
- `ssh_password` (string) - The password to use to SSH into the machine once
the OS is installed.
### Optional: ### Optional:
- `boot_command` (array of strings) - This is an array of commands to type - `boot_command` (array of strings) - This is an array of commands to type

View File

@ -65,9 +65,6 @@ builder.
- `source_path` (string) - The path to an OVF or OVA file that acts as the - `source_path` (string) - The path to an OVF or OVA file that acts as the
source of this build. It can also be a URL. source of this build. It can also be a URL.
- `ssh_username` (string) - The username to use to SSH into the machine once
the OS is installed.
### Optional: ### Optional:
- `boot_command` (array of strings) - This is an array of commands to type - `boot_command` (array of strings) - This is an array of commands to type

View File

@ -42,6 +42,7 @@ self-install. Still, the example serves to show the basic configuration:
"iso_checksum": "af5f788aee1b32c4b2634734309cc9e9", "iso_checksum": "af5f788aee1b32c4b2634734309cc9e9",
"iso_checksum_type": "md5", "iso_checksum_type": "md5",
"ssh_username": "packer", "ssh_username": "packer",
"ssh_password": "packer",
"shutdown_command": "shutdown -P now" "shutdown_command": "shutdown -P now"
} }
``` ```
@ -80,9 +81,6 @@ builder.
This URL can be either an HTTP URL or a file URL (or path to a file). If This URL can be either an HTTP URL or a file URL (or path to a file). If
this is an HTTP URL, Packer will download it and cache it between runs. this is an HTTP URL, Packer will download it and cache it between runs.
- `ssh_username` (string) - The username to use to SSH into the machine once
the OS is installed.
### Optional: ### Optional:
- `boot_command` (array of strings) - This is an array of commands to type - `boot_command` (array of strings) - This is an array of commands to type

View File

@ -56,9 +56,6 @@ builder.
- `source_path` (string) - Path to the source VMX file to clone. - `source_path` (string) - Path to the source VMX file to clone.
- `ssh_username` (string) - The username to use to SSH into the machine once
the OS is installed.
### Optional: ### Optional:
- `boot_command` (array of strings) - This is an array of commands to type - `boot_command` (array of strings) - This is an array of commands to type

View File

@ -4,9 +4,10 @@ description: |
various builders and imports it to an Alicloud customized image list. various builders and imports it to an Alicloud customized image list.
layout: docs layout: docs
page_title: 'Alicloud Import Post-Processor' page_title: 'Alicloud Import Post-Processor'
sidebar_current: 'docs-post-processors-alicloud-import'
--- ---
# Aicloud Import Post-Processor # Alicloud Import Post-Processor
Type: `alicloud-import` Type: `alicloud-import`

View File

@ -36,9 +36,6 @@ Required:
- `password` (string) - Password to use to authenticate to the - `password` (string) - Password to use to authenticate to the
vSphere endpoint. vSphere endpoint.
- `resource_pool` (string) - The resource pool to upload the VM to. This is
*not required*.
- `username` (string) - The username to use to authenticate to the - `username` (string) - The username to use to authenticate to the
vSphere endpoint. vSphere endpoint.
@ -52,6 +49,8 @@ Optional:
- `insecure` (boolean) - Whether or not the connection to vSphere can be done - `insecure` (boolean) - Whether or not the connection to vSphere can be done
over an insecure connection. By default this is false. over an insecure connection. By default this is false.
- `resource_pool` (string) - The resource pool to upload the VM to.
- `vm_folder` (string) - The folder within the datastore to store the VM. - `vm_folder` (string) - The folder within the datastore to store the VM.
- `vm_network` (string) - The name of the VM network this VM will be - `vm_network` (string) - The name of the VM network this VM will be

View File

@ -28,7 +28,13 @@ The example below is fully functional.
## Configuration Reference ## Configuration Reference
The reference of available configuration options is listed below. The only The reference of available configuration options is listed below. The only
required argument is the path to your local salt state tree. required element is "local_state_tree".
Required:
- `local_state_tree` (string) - The path to your local [state
tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree).
This will be uploaded to the `remote_state_tree` on the remote.
Optional: Optional:
@ -54,10 +60,6 @@ Optional:
roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration).
This will be uploaded to the `remote_pillar_roots` on the remote. This will be uploaded to the `remote_pillar_roots` on the remote.
- `local_state_tree` (string) - The path to your local [state
tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree).
This will be uploaded to the `remote_state_tree` on the remote.
- `custom_state` (string) - A state to be run instead of `state.highstate`. - `custom_state` (string) - A state to be run instead of `state.highstate`.
Defaults to `state.highstate` if unspecified. Defaults to `state.highstate` if unspecified.

View File

@ -67,8 +67,7 @@ The SSH communicator has the following options:
- `ssh_bastion_password` (string) - The password to use to authenticate - `ssh_bastion_password` (string) - The password to use to authenticate
with the bastion host. with the bastion host.
- `ssh_bastion_port` (integer) - The port of the bastion host. Defaults to - `ssh_bastion_port` (integer) - The port of the bastion host. Defaults to 1.
1.
- `ssh_bastion_private_key_file` (string) - A private key file to use - `ssh_bastion_private_key_file` (string) - A private key file to use
to authenticate with the bastion host. to authenticate with the bastion host.

File diff suppressed because it is too large Load Diff