Merge branch 'master' into ovfexportpath-localoutputdir
This commit is contained in:
commit
9eb9abdce9
|
@ -7,7 +7,7 @@ language: go
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- 1.7.4
|
- 1.7.4
|
||||||
- 1.8beta1
|
- 1.8
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- make deps
|
- make deps
|
||||||
|
|
33
CHANGELOG.md
33
CHANGELOG.md
|
@ -1,6 +1,36 @@
|
||||||
## (Unreleased)
|
## (Unreleased)
|
||||||
|
|
||||||
* builder/docker: create export dir if needed [GH-4439]
|
### FEATURES:
|
||||||
|
|
||||||
|
* **New builder:** `ebs-surrogate` for building AMIs from EBS volumes. [GH-4351]
|
||||||
|
|
||||||
|
### IMPROVEMENTS:
|
||||||
|
|
||||||
|
* builder/amazon: Add BuildRegion and SourceAMI template variables. [GH-4399]
|
||||||
|
* builder/amazon: Change EC2 Windows password timeout to 20 minutes. [GH-4590]
|
||||||
|
* builder/amazon-chroot: support encrypted boot volume. [GH-4584]
|
||||||
|
* builder/docker: create export dir if needed. [GH-4439]
|
||||||
|
* builder/googlecompute: Add `on_host_maintenance` option. [GH-4544]
|
||||||
|
* builder/openstack: add reuse_ips option to try to re-use existing IPs. [GH-4564]
|
||||||
|
* communicator/docker: preserve file mode. [GH-4443]
|
||||||
|
* communicator/winrm: support ProxyFromEnvironment. [GH-4463]
|
||||||
|
* core: make VNC links clickable in terminal. [GH-4497] [GH-4498]
|
||||||
|
* post-processor/amazon-import: support AMI attributes on import [GH-4216]
|
||||||
|
* provisioner/ansible: use randomized staging dir [GH-4472]
|
||||||
|
* communicator/ssh: Use SSH agent when enabled for bastion step. [GH-4598]
|
||||||
|
* builder/amazon: enable ena when `enhanced_networking` is set. [GH-4578]
|
||||||
|
|
||||||
|
### BUG FIXES:
|
||||||
|
|
||||||
|
* builder/amazon: Fix ssh agent authentication. [GH-4597]
|
||||||
|
* builder/amazon-ebsvolume: Fix interpolation of block_device. [GH-4464]
|
||||||
|
* builder/googlecompute: fix bug when creating image from custom image_family.
|
||||||
|
[GH-4518]
|
||||||
|
* builder/virtualbox: remove guest additions before saving image. [GH-4496]
|
||||||
|
* builder/vmware: ESXi: VNC port timeout increased to 5 s. [GH-4480]
|
||||||
|
* core: always check for an error first when walking a path. [GH-4467]
|
||||||
|
* builder/docker: Don't force tag if using a docker version that doesn't
|
||||||
|
support it. [GH-4560]
|
||||||
|
|
||||||
## 0.12.2 (January 20, 2017)
|
## 0.12.2 (January 20, 2017)
|
||||||
|
|
||||||
|
@ -33,6 +63,7 @@
|
||||||
* provisioner/powershell: Allow equals sign in value of environment
|
* provisioner/powershell: Allow equals sign in value of environment
|
||||||
variables. [GH-4328]
|
variables. [GH-4328]
|
||||||
* provisioner/puppet-server: Add default facts. [GH-4286]
|
* provisioner/puppet-server: Add default facts. [GH-4286]
|
||||||
|
* builder/qemu: Detect input disk image format during copy/convert. [GH-4343]
|
||||||
|
|
||||||
### BUG FIXES:
|
### BUG FIXES:
|
||||||
|
|
||||||
|
|
|
@ -100,6 +100,18 @@ From there, open your fork in your browser to open a new pull-request.
|
||||||
|
|
||||||
### Tips for Working on Packer
|
### Tips for Working on Packer
|
||||||
|
|
||||||
|
#### Working on forks
|
||||||
|
|
||||||
|
The easiest way to work on a fork is to set it as a remote of the packer project. After following the steps in "Setting up Go to work on Packer":
|
||||||
|
|
||||||
|
1. Navigate to $GOPATH/src/github.com/mitchellh/packer
|
||||||
|
2. Add the remote `git remote add <name of remote> <github url of fork>`. For example `git remote add mwhooker https://github.com/mwhooker/packer.git`.
|
||||||
|
3. Checkout a feature branch: `git checkout -b new-feature`
|
||||||
|
4. Make changes
|
||||||
|
5. (Optional) Push your changes to the fork: `git push -u <name of remote> new-feature`
|
||||||
|
|
||||||
|
This way you can push to your fork to create a PR, but the code on disk still lives in the spot where the go cli tools are expecting to find it.
|
||||||
|
|
||||||
#### Govendor
|
#### Govendor
|
||||||
|
|
||||||
If you are submitting a change that requires new or updated dependencies, please include them in `vendor/vendor.json` and in the `vendor/` folder. This helps everything get tested properly in CI.
|
If you are submitting a change that requires new or updated dependencies, please include them in `vendor/vendor.json` and in the `vendor/` folder. This helps everything get tested properly in CI.
|
||||||
|
|
|
@ -73,8 +73,8 @@ file as `quick-start.json`. Export your AWS credentials as the
|
||||||
"access_key": "{{user `access_key`}}",
|
"access_key": "{{user `access_key`}}",
|
||||||
"secret_key": "{{user `secret_key`}}",
|
"secret_key": "{{user `secret_key`}}",
|
||||||
"region": "us-east-1",
|
"region": "us-east-1",
|
||||||
"source_ami": "ami-de0d9eb7",
|
"source_ami": "ami-af22d9b9",
|
||||||
"instance_type": "t1.micro",
|
"instance_type": "t2.micro",
|
||||||
"ssh_username": "ubuntu",
|
"ssh_username": "ubuntu",
|
||||||
"ami_name": "packer-example {{timestamp}}"
|
"ami_name": "packer-example {{timestamp}}"
|
||||||
}]
|
}]
|
||||||
|
|
|
@ -64,6 +64,9 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||||
InterpolateContext: &b.config.ctx,
|
InterpolateContext: &b.config.ctx,
|
||||||
InterpolateFilter: &interpolate.RenderFilter{
|
InterpolateFilter: &interpolate.RenderFilter{
|
||||||
Exclude: []string{
|
Exclude: []string{
|
||||||
|
"ami_description",
|
||||||
|
"snapshot_tags",
|
||||||
|
"tags",
|
||||||
"command_wrapper",
|
"command_wrapper",
|
||||||
"post_mount_commands",
|
"post_mount_commands",
|
||||||
"pre_mount_commands",
|
"pre_mount_commands",
|
||||||
|
@ -251,6 +254,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
&StepRegisterAMI{
|
&StepRegisterAMI{
|
||||||
RootVolumeSize: b.config.RootVolumeSize,
|
RootVolumeSize: b.config.RootVolumeSize,
|
||||||
},
|
},
|
||||||
|
&awscommon.StepCreateEncryptedAMICopy{
|
||||||
|
KeyID: b.config.AMIKmsKeyId,
|
||||||
|
EncryptBootVolume: b.config.AMIEncryptBootVolume,
|
||||||
|
Name: b.config.AMIName,
|
||||||
|
},
|
||||||
&awscommon.StepAMIRegionCopy{
|
&awscommon.StepAMIRegionCopy{
|
||||||
AccessConfig: &b.config.AccessConfig,
|
AccessConfig: &b.config.AccessConfig,
|
||||||
Regions: b.config.AMIRegions,
|
Regions: b.config.AMIRegions,
|
||||||
|
@ -263,10 +271,12 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
ProductCodes: b.config.AMIProductCodes,
|
ProductCodes: b.config.AMIProductCodes,
|
||||||
SnapshotUsers: b.config.SnapshotUsers,
|
SnapshotUsers: b.config.SnapshotUsers,
|
||||||
SnapshotGroups: b.config.SnapshotGroups,
|
SnapshotGroups: b.config.SnapshotGroups,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
},
|
},
|
||||||
&awscommon.StepCreateTags{
|
&awscommon.StepCreateTags{
|
||||||
Tags: b.config.AMITags,
|
Tags: b.config.AMITags,
|
||||||
SnapshotTags: b.config.SnapshotTags,
|
SnapshotTags: b.config.SnapshotTags,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -75,9 +75,14 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
registerOpts = buildRegisterOpts(config, image, newMappings)
|
registerOpts = buildRegisterOpts(config, image, newMappings)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
|
||||||
if config.AMIEnhancedNetworking {
|
if config.AMIEnhancedNetworking {
|
||||||
|
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
||||||
|
// As of February 2017, this applies to C3, C4, D2, I2, R3, and M4 (excluding m4.16xlarge)
|
||||||
registerOpts.SriovNetSupport = aws.String("simple")
|
registerOpts.SriovNetSupport = aws.String("simple")
|
||||||
|
|
||||||
|
// Set EnaSupport to true
|
||||||
|
// As of February 2017, this applies to C5, I3, P2, R4, X1, and m4.16xlarge
|
||||||
|
registerOpts.EnaSupport = aws.Bool(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
registerResp, err := ec2conn.RegisterImage(registerOpts)
|
registerResp, err := ec2conn.RegisterImage(registerOpts)
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
package common
|
||||||
|
|
||||||
|
type BuildInfoTemplate struct {
|
||||||
|
SourceAMI string
|
||||||
|
BuildRegion string
|
||||||
|
}
|
|
@ -66,7 +66,7 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.WindowsPasswordTimeout == 0 {
|
if c.WindowsPasswordTimeout == 0 {
|
||||||
c.WindowsPasswordTimeout = 10 * time.Minute
|
c.WindowsPasswordTimeout = 20 * time.Minute
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.RunTags == nil {
|
if c.RunTags == nil {
|
||||||
|
|
|
@ -11,11 +11,13 @@ import (
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/mitchellh/multistep"
|
||||||
retry "github.com/mitchellh/packer/common"
|
retry "github.com/mitchellh/packer/common"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/mitchellh/packer/packer"
|
||||||
|
"github.com/mitchellh/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StepCreateTags struct {
|
type StepCreateTags struct {
|
||||||
Tags map[string]string
|
Tags map[string]string
|
||||||
SnapshotTags map[string]string
|
SnapshotTags map[string]string
|
||||||
|
Ctx interpolate.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
@ -23,6 +25,13 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
amis := state.Get("amis").(map[string]string)
|
amis := state.Get("amis").(map[string]string)
|
||||||
|
|
||||||
|
var sourceAMI string
|
||||||
|
if rawSourceAMI, hasSourceAMI := state.GetOk("source_image"); hasSourceAMI {
|
||||||
|
sourceAMI = *rawSourceAMI.(*ec2.Image).ImageId
|
||||||
|
} else {
|
||||||
|
sourceAMI = ""
|
||||||
|
}
|
||||||
|
|
||||||
if len(s.Tags) == 0 && len(s.SnapshotTags) == 0 {
|
if len(s.Tags) == 0 && len(s.SnapshotTags) == 0 {
|
||||||
return multistep.ActionContinue
|
return multistep.ActionContinue
|
||||||
}
|
}
|
||||||
|
@ -79,9 +88,20 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
|
||||||
// Convert tags to ec2.Tag format
|
// Convert tags to ec2.Tag format
|
||||||
ui.Say("Creating AMI tags")
|
ui.Say("Creating AMI tags")
|
||||||
amiTags := ConvertToEC2Tags(s.Tags)
|
amiTags, err := ConvertToEC2Tags(s.Tags, *ec2conn.Config.Region, sourceAMI, s.Ctx)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
ui.Say("Creating snapshot tags")
|
ui.Say("Creating snapshot tags")
|
||||||
snapshotTags := ConvertToEC2Tags(s.SnapshotTags)
|
snapshotTags, err := ConvertToEC2Tags(s.SnapshotTags, *ec2conn.Config.Region, sourceAMI, s.Ctx)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
// Retry creating tags for about 2.5 minutes
|
// Retry creating tags for about 2.5 minutes
|
||||||
err = retry.Retry(0.2, 30, 11, func() (bool, error) {
|
err = retry.Retry(0.2, 30, 11, func() (bool, error) {
|
||||||
|
@ -130,14 +150,25 @@ func (s *StepCreateTags) Cleanup(state multistep.StateBag) {
|
||||||
// No cleanup...
|
// No cleanup...
|
||||||
}
|
}
|
||||||
|
|
||||||
func ConvertToEC2Tags(tags map[string]string) []*ec2.Tag {
|
func ConvertToEC2Tags(tags map[string]string, region, sourceAmiId string, ctx interpolate.Context) ([]*ec2.Tag, error) {
|
||||||
var ec2tags []*ec2.Tag
|
var ec2Tags []*ec2.Tag
|
||||||
for key, value := range tags {
|
for key, value := range tags {
|
||||||
log.Printf("[DEBUG] Creating tag %s=%s", key, value)
|
|
||||||
ec2tags = append(ec2tags, &ec2.Tag{
|
ctx.Data = &BuildInfoTemplate{
|
||||||
|
SourceAMI: sourceAmiId,
|
||||||
|
BuildRegion: region,
|
||||||
|
}
|
||||||
|
interpolatedValue, err := interpolate.Render(value, &ctx)
|
||||||
|
if err != nil {
|
||||||
|
return ec2Tags, fmt.Errorf("Error processing tag: %s:%s - %s", key, value, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Adding tag: \"%s\": \"%s\"", key, interpolatedValue)
|
||||||
|
ec2Tags = append(ec2Tags, &ec2.Tag{
|
||||||
Key: aws.String(key),
|
Key: aws.String(key),
|
||||||
Value: aws.String(value),
|
Value: aws.String(interpolatedValue),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return ec2tags
|
|
||||||
|
return ec2Tags, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package ebs
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -7,22 +7,23 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/mitchellh/multistep"
|
||||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/mitchellh/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type stepCreateEncryptedAMICopy struct {
|
type StepCreateEncryptedAMICopy struct {
|
||||||
image *ec2.Image
|
image *ec2.Image
|
||||||
|
KeyID string
|
||||||
|
EncryptBootVolume bool
|
||||||
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stepCreateEncryptedAMICopy) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepCreateEncryptedAMICopy) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
config := state.Get("config").(Config)
|
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
kmsKeyId := config.AMIConfig.AMIKmsKeyId
|
kmsKeyId := s.KeyID
|
||||||
|
|
||||||
// Encrypt boot not set, so skip step
|
// Encrypt boot not set, so skip step
|
||||||
if !config.AMIConfig.AMIEncryptBootVolume {
|
if !s.EncryptBootVolume {
|
||||||
if kmsKeyId != "" {
|
if kmsKeyId != "" {
|
||||||
log.Printf(fmt.Sprintf("Ignoring KMS Key ID: %s, encrypted=false", kmsKeyId))
|
log.Printf(fmt.Sprintf("Ignoring KMS Key ID: %s, encrypted=false", kmsKeyId))
|
||||||
}
|
}
|
||||||
|
@ -46,7 +47,7 @@ func (s *stepCreateEncryptedAMICopy) Run(state multistep.StateBag) multistep.Ste
|
||||||
}
|
}
|
||||||
|
|
||||||
copyOpts := &ec2.CopyImageInput{
|
copyOpts := &ec2.CopyImageInput{
|
||||||
Name: &config.AMIName, // Try to overwrite existing AMI
|
Name: &s.Name, // Try to overwrite existing AMI
|
||||||
SourceImageId: aws.String(id),
|
SourceImageId: aws.String(id),
|
||||||
SourceRegion: aws.String(region),
|
SourceRegion: aws.String(region),
|
||||||
Encrypted: aws.Bool(true),
|
Encrypted: aws.Bool(true),
|
||||||
|
@ -62,15 +63,15 @@ func (s *stepCreateEncryptedAMICopy) Run(state multistep.StateBag) multistep.Ste
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the copy to become ready
|
// Wait for the copy to become ready
|
||||||
stateChange := awscommon.StateChangeConf{
|
stateChange := StateChangeConf{
|
||||||
Pending: []string{"pending"},
|
Pending: []string{"pending"},
|
||||||
Target: "available",
|
Target: "available",
|
||||||
Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *copyResp.ImageId),
|
Refresh: AMIStateRefreshFunc(ec2conn, *copyResp.ImageId),
|
||||||
StepState: state,
|
StepState: state,
|
||||||
}
|
}
|
||||||
|
|
||||||
ui.Say("Waiting for AMI copy to become ready...")
|
ui.Say("Waiting for AMI copy to become ready...")
|
||||||
if _, err := awscommon.WaitForState(&stateChange); err != nil {
|
if _, err := WaitForState(&stateChange); err != nil {
|
||||||
err := fmt.Errorf("Error waiting for AMI Copy: %s", err)
|
err := fmt.Errorf("Error waiting for AMI Copy: %s", err)
|
||||||
state.Put("error", err)
|
state.Put("error", err)
|
||||||
ui.Error(err.Error())
|
ui.Error(err.Error())
|
||||||
|
@ -146,7 +147,7 @@ func (s *stepCreateEncryptedAMICopy) Run(state multistep.StateBag) multistep.Ste
|
||||||
return multistep.ActionContinue
|
return multistep.ActionContinue
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stepCreateEncryptedAMICopy) Cleanup(state multistep.StateBag) {
|
func (s *StepCreateEncryptedAMICopy) Cleanup(state multistep.StateBag) {
|
||||||
if s.image == nil {
|
if s.image == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
|
@ -107,7 +107,7 @@ func (s *StepKeyPair) Cleanup(state multistep.StateBag) {
|
||||||
// If no key name is set, then we never created it, so just return
|
// If no key name is set, then we never created it, so just return
|
||||||
// If we used an SSH private key file, do not go about deleting
|
// If we used an SSH private key file, do not go about deleting
|
||||||
// keypairs
|
// keypairs
|
||||||
if s.PrivateKeyFile != "" || s.KeyPairName != "" {
|
if s.PrivateKeyFile != "" || (s.KeyPairName == "" && s.keyName == "") {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/mitchellh/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/mitchellh/packer/packer"
|
||||||
|
"github.com/mitchellh/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StepModifyAMIAttributes struct {
|
type StepModifyAMIAttributes struct {
|
||||||
|
@ -17,12 +18,20 @@ type StepModifyAMIAttributes struct {
|
||||||
SnapshotGroups []string
|
SnapshotGroups []string
|
||||||
ProductCodes []string
|
ProductCodes []string
|
||||||
Description string
|
Description string
|
||||||
|
Ctx interpolate.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
amis := state.Get("amis").(map[string]string)
|
amis := state.Get("amis").(map[string]string)
|
||||||
|
|
||||||
|
var sourceAMI string
|
||||||
|
if rawSourceAMI, hasSourceAMI := state.GetOk("source_image"); hasSourceAMI {
|
||||||
|
sourceAMI = *rawSourceAMI.(*ec2.Image).ImageId
|
||||||
|
} else {
|
||||||
|
sourceAMI = ""
|
||||||
|
}
|
||||||
snapshots := state.Get("snapshots").(map[string][]string)
|
snapshots := state.Get("snapshots").(map[string][]string)
|
||||||
|
|
||||||
// Determine if there is any work to do.
|
// Determine if there is any work to do.
|
||||||
|
@ -38,6 +47,18 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc
|
||||||
return multistep.ActionContinue
|
return multistep.ActionContinue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
s.Ctx.Data = &BuildInfoTemplate{
|
||||||
|
SourceAMI: sourceAMI,
|
||||||
|
BuildRegion: *ec2conn.Config.Region,
|
||||||
|
}
|
||||||
|
s.Description, err = interpolate.Render(s.Description, &s.Ctx)
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("Error interpolating AMI description: %s", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
// Construct the modify image and snapshot attribute requests we're going
|
// Construct the modify image and snapshot attribute requests we're going
|
||||||
// to make. We need to make each separately since the EC2 API only allows
|
// to make. We need to make each separately since the EC2 API only allows
|
||||||
// changing one type at a kind currently.
|
// changing one type at a kind currently.
|
||||||
|
|
|
@ -3,6 +3,7 @@ package common
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/mitchellh/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/mitchellh/packer/packer"
|
||||||
|
@ -17,16 +18,31 @@ func (s *StepModifyEBSBackedInstance) Run(state multistep.StateBag) multistep.St
|
||||||
instance := state.Get("instance").(*ec2.Instance)
|
instance := state.Get("instance").(*ec2.Instance)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
|
||||||
if s.EnableEnhancedNetworking {
|
if s.EnableEnhancedNetworking {
|
||||||
ui.Say("Enabling Enhanced Networking...")
|
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
||||||
|
// As of February 2017, this applies to C3, C4, D2, I2, R3, and M4 (excluding m4.16xlarge)
|
||||||
|
ui.Say("Enabling Enhanced Networking (SR-IOV)...")
|
||||||
simple := "simple"
|
simple := "simple"
|
||||||
_, err := ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
|
_, err := ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
|
||||||
InstanceId: instance.InstanceId,
|
InstanceId: instance.InstanceId,
|
||||||
SriovNetSupport: &ec2.AttributeValue{Value: &simple},
|
SriovNetSupport: &ec2.AttributeValue{Value: &simple},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err := fmt.Errorf("Error enabling Enhanced Networking on %s: %s", *instance.InstanceId, err)
|
err := fmt.Errorf("Error enabling Enhanced Networking (SR-IOV) on %s: %s", *instance.InstanceId, err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set EnaSupport to true.
|
||||||
|
// As of February 2017, this applies to C5, I3, P2, R4, X1, and m4.16xlarge
|
||||||
|
ui.Say("Enabling Enhanced Networking (ENA)...")
|
||||||
|
_, err = ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
|
||||||
|
InstanceId: instance.InstanceId,
|
||||||
|
EnaSupport: &ec2.AttributeBooleanValue{Value: aws.Bool(true)},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error enabling Enhanced Networking (ENA) on %s: %s", *instance.InstanceId, err)
|
||||||
state.Put("error", err)
|
state.Put("error", err)
|
||||||
ui.Error(err.Error())
|
ui.Error(err.Error())
|
||||||
return multistep.ActionHalt
|
return multistep.ActionHalt
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
|
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/mitchellh/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/mitchellh/packer/packer"
|
||||||
|
"github.com/mitchellh/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StepRunSourceInstance struct {
|
type StepRunSourceInstance struct {
|
||||||
|
@ -32,6 +33,7 @@ type StepRunSourceInstance struct {
|
||||||
Tags map[string]string
|
Tags map[string]string
|
||||||
UserData string
|
UserData string
|
||||||
UserDataFile string
|
UserDataFile string
|
||||||
|
Ctx interpolate.Context
|
||||||
|
|
||||||
instanceId string
|
instanceId string
|
||||||
spotRequest *ec2.SpotInstanceRequest
|
spotRequest *ec2.SpotInstanceRequest
|
||||||
|
@ -39,7 +41,10 @@ type StepRunSourceInstance struct {
|
||||||
|
|
||||||
func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
keyName := state.Get("keyPair").(string)
|
var keyName string
|
||||||
|
if name, ok := state.GetOk("keyPair"); ok {
|
||||||
|
keyName = name.(string)
|
||||||
|
}
|
||||||
securityGroupIds := aws.StringSlice(state.Get("securityGroupIds").([]string))
|
securityGroupIds := aws.StringSlice(state.Get("securityGroupIds").([]string))
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
@ -275,7 +280,14 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
|
||||||
if _, exists := s.Tags["Name"]; !exists {
|
if _, exists := s.Tags["Name"]; !exists {
|
||||||
s.Tags["Name"] = "Packer Builder"
|
s.Tags["Name"] = "Packer Builder"
|
||||||
}
|
}
|
||||||
ec2Tags := ConvertToEC2Tags(s.Tags)
|
|
||||||
|
ec2Tags, err := ConvertToEC2Tags(s.Tags, *ec2conn.Config.Region, s.SourceAMI, s.Ctx)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error tagging source instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{
|
_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{
|
||||||
Tags: ec2Tags,
|
Tags: ec2Tags,
|
||||||
|
|
|
@ -101,7 +101,7 @@ func (s *StepSourceAMIInfo) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
|
||||||
ui.Message(fmt.Sprintf("Found Image ID: %s", *image.ImageId))
|
ui.Message(fmt.Sprintf("Found Image ID: %s", *image.ImageId))
|
||||||
|
|
||||||
// Enhanced Networking (SriovNetSupport) can only be enabled on HVM AMIs.
|
// Enhanced Networking can only be enabled on HVM AMIs.
|
||||||
// See http://goo.gl/icuXh5
|
// See http://goo.gl/icuXh5
|
||||||
if s.EnhancedNetworking && *image.VirtualizationType != "hvm" {
|
if s.EnhancedNetworking && *image.VirtualizationType != "hvm" {
|
||||||
err := fmt.Errorf("Cannot enable enhanced networking, source AMI '%s' is not HVM", s.SourceAmi)
|
err := fmt.Errorf("Cannot enable enhanced networking, source AMI '%s' is not HVM", s.SourceAmi)
|
||||||
|
|
|
@ -44,6 +44,15 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||||
err := config.Decode(&b.config, &config.DecodeOpts{
|
err := config.Decode(&b.config, &config.DecodeOpts{
|
||||||
Interpolate: true,
|
Interpolate: true,
|
||||||
InterpolateContext: &b.config.ctx,
|
InterpolateContext: &b.config.ctx,
|
||||||
|
InterpolateFilter: &interpolate.RenderFilter{
|
||||||
|
Exclude: []string{
|
||||||
|
"ami_description",
|
||||||
|
"run_tags",
|
||||||
|
"run_volume_tags",
|
||||||
|
"snapshot_tags",
|
||||||
|
"tags",
|
||||||
|
},
|
||||||
|
},
|
||||||
}, raws...)
|
}, raws...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -137,10 +146,12 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
AvailabilityZone: b.config.AvailabilityZone,
|
AvailabilityZone: b.config.AvailabilityZone,
|
||||||
BlockDevices: b.config.BlockDevices,
|
BlockDevices: b.config.BlockDevices,
|
||||||
Tags: b.config.RunTags,
|
Tags: b.config.RunTags,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
||||||
},
|
},
|
||||||
&stepTagEBSVolumes{
|
&stepTagEBSVolumes{
|
||||||
VolumeRunTags: b.config.VolumeRunTags,
|
VolumeRunTags: b.config.VolumeRunTags,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
},
|
},
|
||||||
&awscommon.StepGetPassword{
|
&awscommon.StepGetPassword{
|
||||||
Debug: b.config.PackerDebug,
|
Debug: b.config.PackerDebug,
|
||||||
|
@ -171,7 +182,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
AMIName: b.config.AMIName,
|
AMIName: b.config.AMIName,
|
||||||
},
|
},
|
||||||
&stepCreateAMI{},
|
&stepCreateAMI{},
|
||||||
&stepCreateEncryptedAMICopy{},
|
&awscommon.StepCreateEncryptedAMICopy{
|
||||||
|
KeyID: b.config.AMIKmsKeyId,
|
||||||
|
EncryptBootVolume: b.config.AMIEncryptBootVolume,
|
||||||
|
Name: b.config.AMIName,
|
||||||
|
},
|
||||||
&awscommon.StepAMIRegionCopy{
|
&awscommon.StepAMIRegionCopy{
|
||||||
AccessConfig: &b.config.AccessConfig,
|
AccessConfig: &b.config.AccessConfig,
|
||||||
Regions: b.config.AMIRegions,
|
Regions: b.config.AMIRegions,
|
||||||
|
@ -184,10 +199,12 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
ProductCodes: b.config.AMIProductCodes,
|
ProductCodes: b.config.AMIProductCodes,
|
||||||
SnapshotUsers: b.config.SnapshotUsers,
|
SnapshotUsers: b.config.SnapshotUsers,
|
||||||
SnapshotGroups: b.config.SnapshotGroups,
|
SnapshotGroups: b.config.SnapshotGroups,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
},
|
},
|
||||||
&awscommon.StepCreateTags{
|
&awscommon.StepCreateTags{
|
||||||
Tags: b.config.AMITags,
|
Tags: b.config.AMITags,
|
||||||
SnapshotTags: b.config.SnapshotTags,
|
SnapshotTags: b.config.SnapshotTags,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,15 +7,18 @@ import (
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/mitchellh/multistep"
|
||||||
"github.com/mitchellh/packer/builder/amazon/common"
|
"github.com/mitchellh/packer/builder/amazon/common"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/mitchellh/packer/packer"
|
||||||
|
"github.com/mitchellh/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type stepTagEBSVolumes struct {
|
type stepTagEBSVolumes struct {
|
||||||
VolumeRunTags map[string]string
|
VolumeRunTags map[string]string
|
||||||
|
Ctx interpolate.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stepTagEBSVolumes) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *stepTagEBSVolumes) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
instance := state.Get("instance").(*ec2.Instance)
|
instance := state.Get("instance").(*ec2.Instance)
|
||||||
|
sourceAMI := state.Get("source_image").(*ec2.Image)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
if len(s.VolumeRunTags) == 0 {
|
if len(s.VolumeRunTags) == 0 {
|
||||||
|
@ -34,9 +37,15 @@ func (s *stepTagEBSVolumes) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
}
|
}
|
||||||
|
|
||||||
ui.Say("Adding tags to source EBS Volumes")
|
ui.Say("Adding tags to source EBS Volumes")
|
||||||
tags := common.ConvertToEC2Tags(s.VolumeRunTags)
|
tags, err := common.ConvertToEC2Tags(s.VolumeRunTags, *ec2conn.Config.Region, *sourceAMI.ImageId, s.Ctx)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error tagging source EBS Volumes on %s: %s", *instance.InstanceId, err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
_, err := ec2conn.CreateTags(&ec2.CreateTagsInput{
|
_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{
|
||||||
Resources: volumeIds,
|
Resources: volumeIds,
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
})
|
})
|
||||||
|
|
|
@ -0,0 +1,218 @@
|
||||||
|
// The ebssurrogate package contains a packer.Builder implementation that
|
||||||
|
// builds a new EBS-backed AMI using an ephemeral instance.
|
||||||
|
package ebssurrogate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
"github.com/hashicorp/errwrap"
|
||||||
|
"github.com/mitchellh/multistep"
|
||||||
|
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||||
|
"github.com/mitchellh/packer/common"
|
||||||
|
"github.com/mitchellh/packer/helper/communicator"
|
||||||
|
"github.com/mitchellh/packer/helper/config"
|
||||||
|
"github.com/mitchellh/packer/packer"
|
||||||
|
"github.com/mitchellh/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
const BuilderId = "mitchellh.amazon.ebssurrogate"
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
common.PackerConfig `mapstructure:",squash"`
|
||||||
|
awscommon.AccessConfig `mapstructure:",squash"`
|
||||||
|
awscommon.RunConfig `mapstructure:",squash"`
|
||||||
|
awscommon.BlockDevices `mapstructure:",squash"`
|
||||||
|
awscommon.AMIConfig `mapstructure:",squash"`
|
||||||
|
|
||||||
|
RootDevice RootBlockDevice `mapstructure:"ami_root_device"`
|
||||||
|
|
||||||
|
ctx interpolate.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
type Builder struct {
|
||||||
|
config Config
|
||||||
|
runner multistep.Runner
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||||
|
b.config.ctx.Funcs = awscommon.TemplateFuncs
|
||||||
|
err := config.Decode(&b.config, &config.DecodeOpts{
|
||||||
|
Interpolate: true,
|
||||||
|
InterpolateContext: &b.config.ctx,
|
||||||
|
InterpolateFilter: &interpolate.RenderFilter{
|
||||||
|
Exclude: []string{
|
||||||
|
"ami_description",
|
||||||
|
"run_tags",
|
||||||
|
"tags",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, raws...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accumulate any errors
|
||||||
|
var errs *packer.MultiError
|
||||||
|
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)
|
||||||
|
errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)
|
||||||
|
errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(&b.config.ctx)...)
|
||||||
|
errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(&b.config.ctx)...)
|
||||||
|
errs = packer.MultiErrorAppend(errs, b.config.RootDevice.Prepare(&b.config.ctx)...)
|
||||||
|
|
||||||
|
if b.config.AMIVirtType == "" {
|
||||||
|
errs = packer.MultiErrorAppend(errs, errors.New("ami_virtualization_type is required."))
|
||||||
|
}
|
||||||
|
|
||||||
|
foundRootVolume := false
|
||||||
|
for _, launchDevice := range b.config.BlockDevices.LaunchMappings {
|
||||||
|
if launchDevice.DeviceName == b.config.RootDevice.SourceDeviceName {
|
||||||
|
foundRootVolume = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !foundRootVolume {
|
||||||
|
errs = packer.MultiErrorAppend(errs, fmt.Errorf("no volume with name '%s' is found", b.config.RootDevice.SourceDeviceName))
|
||||||
|
}
|
||||||
|
|
||||||
|
if errs != nil && len(errs.Errors) > 0 {
|
||||||
|
return nil, errs
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey))
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
|
||||||
|
awsConfig, err := b.config.Config()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
awsSession, err := session.NewSession(awsConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrapf("Error creating AWS Session: {{err}}", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ec2conn := ec2.New(awsSession)
|
||||||
|
|
||||||
|
// If the subnet is specified but not the AZ, try to determine the AZ automatically
|
||||||
|
if b.config.SubnetId != "" && b.config.AvailabilityZone == "" {
|
||||||
|
log.Printf("[INFO] Finding AZ for the given subnet '%s'", b.config.SubnetId)
|
||||||
|
resp, err := ec2conn.DescribeSubnets(&ec2.DescribeSubnetsInput{SubnetIds: []*string{&b.config.SubnetId}})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b.config.AvailabilityZone = *resp.Subnets[0].AvailabilityZone
|
||||||
|
log.Printf("[INFO] AZ found: '%s'", b.config.AvailabilityZone)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup the state bag and initial state for the steps
|
||||||
|
state := new(multistep.BasicStateBag)
|
||||||
|
state.Put("config", &b.config)
|
||||||
|
state.Put("ec2", ec2conn)
|
||||||
|
state.Put("hook", hook)
|
||||||
|
state.Put("ui", ui)
|
||||||
|
|
||||||
|
// Build the steps
|
||||||
|
steps := []multistep.Step{
|
||||||
|
&awscommon.StepSourceAMIInfo{
|
||||||
|
SourceAmi: b.config.SourceAmi,
|
||||||
|
EnhancedNetworking: b.config.AMIEnhancedNetworking,
|
||||||
|
AmiFilters: b.config.SourceAmiFilter,
|
||||||
|
},
|
||||||
|
&awscommon.StepKeyPair{
|
||||||
|
Debug: b.config.PackerDebug,
|
||||||
|
SSHAgentAuth: b.config.Comm.SSHAgentAuth,
|
||||||
|
DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName),
|
||||||
|
KeyPairName: b.config.SSHKeyPairName,
|
||||||
|
TemporaryKeyPairName: b.config.TemporaryKeyPairName,
|
||||||
|
PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey,
|
||||||
|
},
|
||||||
|
&awscommon.StepSecurityGroup{
|
||||||
|
SecurityGroupIds: b.config.SecurityGroupIds,
|
||||||
|
CommConfig: &b.config.RunConfig.Comm,
|
||||||
|
VpcId: b.config.VpcId,
|
||||||
|
},
|
||||||
|
&awscommon.StepRunSourceInstance{
|
||||||
|
Debug: b.config.PackerDebug,
|
||||||
|
ExpectedRootDevice: "ebs",
|
||||||
|
SpotPrice: b.config.SpotPrice,
|
||||||
|
SpotPriceProduct: b.config.SpotPriceAutoProduct,
|
||||||
|
InstanceType: b.config.InstanceType,
|
||||||
|
UserData: b.config.UserData,
|
||||||
|
UserDataFile: b.config.UserDataFile,
|
||||||
|
SourceAMI: b.config.SourceAmi,
|
||||||
|
IamInstanceProfile: b.config.IamInstanceProfile,
|
||||||
|
SubnetId: b.config.SubnetId,
|
||||||
|
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
||||||
|
EbsOptimized: b.config.EbsOptimized,
|
||||||
|
AvailabilityZone: b.config.AvailabilityZone,
|
||||||
|
BlockDevices: b.config.BlockDevices,
|
||||||
|
Tags: b.config.RunTags,
|
||||||
|
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
||||||
|
},
|
||||||
|
&awscommon.StepGetPassword{
|
||||||
|
Debug: b.config.PackerDebug,
|
||||||
|
Comm: &b.config.RunConfig.Comm,
|
||||||
|
Timeout: b.config.WindowsPasswordTimeout,
|
||||||
|
},
|
||||||
|
&communicator.StepConnect{
|
||||||
|
Config: &b.config.RunConfig.Comm,
|
||||||
|
Host: awscommon.SSHHost(
|
||||||
|
ec2conn,
|
||||||
|
b.config.SSHPrivateIp),
|
||||||
|
SSHConfig: awscommon.SSHConfig(
|
||||||
|
b.config.RunConfig.Comm.SSHAgentAuth,
|
||||||
|
b.config.RunConfig.Comm.SSHUsername,
|
||||||
|
b.config.RunConfig.Comm.SSHPassword),
|
||||||
|
},
|
||||||
|
&common.StepProvision{},
|
||||||
|
&awscommon.StepStopEBSBackedInstance{
|
||||||
|
SpotPrice: b.config.SpotPrice,
|
||||||
|
DisableStopInstance: b.config.DisableStopInstance,
|
||||||
|
},
|
||||||
|
&awscommon.StepModifyEBSBackedInstance{
|
||||||
|
EnableEnhancedNetworking: b.config.AMIEnhancedNetworking,
|
||||||
|
},
|
||||||
|
&StepSnapshotNewRootVolume{
|
||||||
|
NewRootMountPoint: b.config.RootDevice.SourceDeviceName,
|
||||||
|
},
|
||||||
|
&StepRegisterAMI{
|
||||||
|
RootDevice: b.config.RootDevice,
|
||||||
|
BlockDevices: b.config.BlockDevices.BuildLaunchDevices(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run!
|
||||||
|
b.runner = common.NewRunner(steps, b.config.PackerConfig, ui)
|
||||||
|
b.runner.Run(state)
|
||||||
|
|
||||||
|
// If there was an error, return that
|
||||||
|
if rawErr, ok := state.GetOk("error"); ok {
|
||||||
|
return nil, rawErr.(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
if amis, ok := state.GetOk("amis"); ok {
|
||||||
|
// Build the artifact and return it
|
||||||
|
artifact := &awscommon.Artifact{
|
||||||
|
Amis: amis.(map[string]string),
|
||||||
|
BuilderIdValue: BuilderId,
|
||||||
|
Conn: ec2conn,
|
||||||
|
}
|
||||||
|
|
||||||
|
return artifact, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) Cancel() {
|
||||||
|
if b.runner != nil {
|
||||||
|
log.Println("Cancelling the step runner...")
|
||||||
|
b.runner.Cancel()
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,56 @@
|
||||||
|
package ebssurrogate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/mitchellh/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testConfig() map[string]interface{} {
|
||||||
|
return map[string]interface{}{
|
||||||
|
"access_key": "foo",
|
||||||
|
"secret_key": "bar",
|
||||||
|
"source_ami": "foo",
|
||||||
|
"instance_type": "foo",
|
||||||
|
"region": "us-east-1",
|
||||||
|
"ssh_username": "root",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilder_ImplementsBuilder(t *testing.T) {
|
||||||
|
var raw interface{}
|
||||||
|
raw = &Builder{}
|
||||||
|
if _, ok := raw.(packer.Builder); !ok {
|
||||||
|
t.Fatal("Builder should be a builder")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilder_Prepare_BadType(t *testing.T) {
|
||||||
|
b := &Builder{}
|
||||||
|
c := map[string]interface{}{
|
||||||
|
"access_key": []string{},
|
||||||
|
}
|
||||||
|
|
||||||
|
warnings, err := b.Prepare(c)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("prepare should fail")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderPrepare_InvalidKey(t *testing.T) {
|
||||||
|
var b Builder
|
||||||
|
config := testConfig()
|
||||||
|
|
||||||
|
// Add a random key
|
||||||
|
config["i_should_not_be_valid"] = true
|
||||||
|
warnings, err := b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,65 @@
|
||||||
|
package ebssurrogate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
"github.com/mitchellh/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RootBlockDevice struct {
|
||||||
|
SourceDeviceName string `mapstructure:"source_device_name"`
|
||||||
|
DeviceName string `mapstructure:"device_name"`
|
||||||
|
DeleteOnTermination bool `mapstructure:"delete_on_termination"`
|
||||||
|
IOPS int64 `mapstructure:"iops"`
|
||||||
|
VolumeType string `mapstructure:"volume_type"`
|
||||||
|
VolumeSize int64 `mapstructure:"volume_size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RootBlockDevice) Prepare(ctx *interpolate.Context) []error {
|
||||||
|
var errs []error
|
||||||
|
|
||||||
|
if c.SourceDeviceName == "" {
|
||||||
|
errs = append(errs, errors.New("source_device_name for the root_device must be specified"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.DeviceName == "" {
|
||||||
|
errs = append(errs, errors.New("device_name for the root_device must be specified"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.VolumeType == "gp2" && c.IOPS != 0 {
|
||||||
|
errs = append(errs, errors.New("iops may not be specified for a gp2 volume"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.IOPS < 0 {
|
||||||
|
errs = append(errs, errors.New("iops must be greater than 0"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.VolumeSize < 0 {
|
||||||
|
errs = append(errs, errors.New("volume_size must be greater than 0"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errs) > 0 {
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *RootBlockDevice) createBlockDeviceMapping(snapshotId string) *ec2.BlockDeviceMapping {
|
||||||
|
rootBlockDevice := &ec2.EbsBlockDevice{
|
||||||
|
SnapshotId: aws.String(snapshotId),
|
||||||
|
VolumeType: aws.String(d.VolumeType),
|
||||||
|
VolumeSize: aws.Int64(d.VolumeSize),
|
||||||
|
DeleteOnTermination: aws.Bool(d.DeleteOnTermination),
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.IOPS != 0 {
|
||||||
|
rootBlockDevice.Iops = aws.Int64(d.IOPS)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ec2.BlockDeviceMapping{
|
||||||
|
DeviceName: aws.String(d.DeviceName),
|
||||||
|
Ebs: rootBlockDevice,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,80 @@
|
||||||
|
package ebssurrogate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
"github.com/mitchellh/multistep"
|
||||||
|
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||||
|
"github.com/mitchellh/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StepRegisterAMI creates the AMI.
|
||||||
|
type StepRegisterAMI struct {
|
||||||
|
RootDevice RootBlockDevice
|
||||||
|
BlockDevices []*ec2.BlockDeviceMapping
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
config := state.Get("config").(*Config)
|
||||||
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
snapshotId := state.Get("snapshot_id").(string)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
ui.Say("Registering the AMI...")
|
||||||
|
|
||||||
|
blockDevices := s.BlockDevices
|
||||||
|
blockDevices = append(blockDevices, s.RootDevice.createBlockDeviceMapping(snapshotId))
|
||||||
|
|
||||||
|
registerOpts := &ec2.RegisterImageInput{
|
||||||
|
Name: &config.AMIName,
|
||||||
|
Architecture: aws.String(ec2.ArchitectureValuesX8664),
|
||||||
|
RootDeviceName: aws.String(s.RootDevice.DeviceName),
|
||||||
|
VirtualizationType: aws.String(config.AMIVirtType),
|
||||||
|
BlockDeviceMappings: blockDevices,
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.AMIEnhancedNetworking {
|
||||||
|
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
||||||
|
// As of February 2017, this applies to C3, C4, D2, I2, R3, and M4 (excluding m4.16xlarge)
|
||||||
|
registerOpts.SriovNetSupport = aws.String("simple")
|
||||||
|
|
||||||
|
// Set EnaSupport to true
|
||||||
|
// As of February 2017, this applies to C5, I3, P2, R4, X1, and m4.16xlarge
|
||||||
|
registerOpts.EnaSupport = aws.Bool(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
registerResp, err := ec2conn.RegisterImage(registerOpts)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", fmt.Errorf("Error registering AMI: %s", err))
|
||||||
|
ui.Error(state.Get("error").(error).Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the AMI ID in the state
|
||||||
|
ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageId))
|
||||||
|
amis := make(map[string]string)
|
||||||
|
amis[*ec2conn.Config.Region] = *registerResp.ImageId
|
||||||
|
state.Put("amis", amis)
|
||||||
|
|
||||||
|
// Wait for the image to become ready
|
||||||
|
stateChange := awscommon.StateChangeConf{
|
||||||
|
Pending: []string{"pending"},
|
||||||
|
Target: "available",
|
||||||
|
Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *registerResp.ImageId),
|
||||||
|
StepState: state,
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.Say("Waiting for AMI to become ready...")
|
||||||
|
if _, err := awscommon.WaitForState(&stateChange); err != nil {
|
||||||
|
err := fmt.Errorf("Error waiting for AMI: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepRegisterAMI) Cleanup(state multistep.StateBag) {}
|
|
@ -0,0 +1,102 @@
|
||||||
|
package ebssurrogate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
"github.com/mitchellh/multistep"
|
||||||
|
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||||
|
"github.com/mitchellh/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StepSnapshotNewRootVolume creates a snapshot of the created volume.
|
||||||
|
//
|
||||||
|
// Produces:
|
||||||
|
// snapshot_id string - ID of the created snapshot
|
||||||
|
type StepSnapshotNewRootVolume struct {
|
||||||
|
NewRootMountPoint string
|
||||||
|
snapshotId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepSnapshotNewRootVolume) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
instance := state.Get("instance").(*ec2.Instance)
|
||||||
|
|
||||||
|
var newRootVolume string
|
||||||
|
for _, volume := range instance.BlockDeviceMappings {
|
||||||
|
if *volume.DeviceName == s.NewRootMountPoint {
|
||||||
|
newRootVolume = *volume.Ebs.VolumeId
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.Say(fmt.Sprintf("Creating snapshot of EBS Volume %s...", newRootVolume))
|
||||||
|
description := fmt.Sprintf("Packer: %s", time.Now().String())
|
||||||
|
|
||||||
|
createSnapResp, err := ec2conn.CreateSnapshot(&ec2.CreateSnapshotInput{
|
||||||
|
VolumeId: &newRootVolume,
|
||||||
|
Description: &description,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error creating snapshot: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the snapshot ID so we can delete it later
|
||||||
|
s.snapshotId = *createSnapResp.SnapshotId
|
||||||
|
ui.Message(fmt.Sprintf("Snapshot ID: %s", s.snapshotId))
|
||||||
|
|
||||||
|
// Wait for the snapshot to be ready
|
||||||
|
stateChange := awscommon.StateChangeConf{
|
||||||
|
Pending: []string{"pending"},
|
||||||
|
StepState: state,
|
||||||
|
Target: "completed",
|
||||||
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
resp, err := ec2conn.DescribeSnapshots(&ec2.DescribeSnapshotsInput{SnapshotIds: []*string{&s.snapshotId}})
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.Snapshots) == 0 {
|
||||||
|
return nil, "", errors.New("No snapshots found.")
|
||||||
|
}
|
||||||
|
|
||||||
|
s := resp.Snapshots[0]
|
||||||
|
return s, *s.State, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = awscommon.WaitForState(&stateChange)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error waiting for snapshot: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
state.Put("snapshot_id", s.snapshotId)
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepSnapshotNewRootVolume) Cleanup(state multistep.StateBag) {
|
||||||
|
if s.snapshotId == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||||
|
_, halted := state.GetOk(multistep.StateHalted)
|
||||||
|
|
||||||
|
if cancelled || halted {
|
||||||
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
ui.Say("Removing snapshot since we cancelled or halted...")
|
||||||
|
_, err := ec2conn.DeleteSnapshot(&ec2.DeleteSnapshotInput{SnapshotId: &s.snapshotId})
|
||||||
|
if err != nil {
|
||||||
|
ui.Error(fmt.Sprintf("Error: %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -2,6 +2,7 @@ package ebsvolume
|
||||||
|
|
||||||
import (
|
import (
|
||||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||||
|
"github.com/mitchellh/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type BlockDevice struct {
|
type BlockDevice struct {
|
||||||
|
@ -9,15 +10,20 @@ type BlockDevice struct {
|
||||||
Tags map[string]string `mapstructure:"tags"`
|
Tags map[string]string `mapstructure:"tags"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func commonBlockDevices(mappings []BlockDevice) awscommon.BlockDevices {
|
func commonBlockDevices(mappings []BlockDevice, ctx *interpolate.Context) (awscommon.BlockDevices, error) {
|
||||||
result := make([]awscommon.BlockDevice, len(mappings))
|
result := make([]awscommon.BlockDevice, len(mappings))
|
||||||
|
|
||||||
for i, mapping := range mappings {
|
for i, mapping := range mappings {
|
||||||
result[i] = mapping.BlockDevice
|
interpolateBlockDev, err := interpolate.RenderInterface(&mapping.BlockDevice, ctx)
|
||||||
|
if err != nil {
|
||||||
|
return awscommon.BlockDevices{}, err
|
||||||
|
}
|
||||||
|
result[i] = *interpolateBlockDev.(*awscommon.BlockDevice)
|
||||||
}
|
}
|
||||||
|
|
||||||
return awscommon.BlockDevices{
|
return awscommon.BlockDevices{
|
||||||
LaunchBlockDevices: awscommon.LaunchBlockDevices{
|
LaunchBlockDevices: awscommon.LaunchBlockDevices{
|
||||||
LaunchMappings: result,
|
LaunchMappings: result,
|
||||||
},
|
},
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@ type Config struct {
|
||||||
VolumeMappings []BlockDevice `mapstructure:"ebs_volumes"`
|
VolumeMappings []BlockDevice `mapstructure:"ebs_volumes"`
|
||||||
AMIEnhancedNetworking bool `mapstructure:"enhanced_networking"`
|
AMIEnhancedNetworking bool `mapstructure:"enhanced_networking"`
|
||||||
|
|
||||||
|
launchBlockDevices awscommon.BlockDevices
|
||||||
ctx interpolate.Context
|
ctx interpolate.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,6 +42,12 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||||
err := config.Decode(&b.config, &config.DecodeOpts{
|
err := config.Decode(&b.config, &config.DecodeOpts{
|
||||||
Interpolate: true,
|
Interpolate: true,
|
||||||
InterpolateContext: &b.config.ctx,
|
InterpolateContext: &b.config.ctx,
|
||||||
|
InterpolateFilter: &interpolate.RenderFilter{
|
||||||
|
Exclude: []string{
|
||||||
|
"run_tags",
|
||||||
|
"ebs_volumes",
|
||||||
|
},
|
||||||
|
},
|
||||||
}, raws...)
|
}, raws...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -51,6 +58,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||||
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)
|
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)
|
||||||
errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)
|
errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)
|
||||||
|
|
||||||
|
b.config.launchBlockDevices, err = commonBlockDevices(b.config.VolumeMappings, &b.config.ctx)
|
||||||
|
if err != nil {
|
||||||
|
errs = packer.MultiErrorAppend(errs, err)
|
||||||
|
}
|
||||||
|
|
||||||
if errs != nil && len(errs.Errors) > 0 {
|
if errs != nil && len(errs.Errors) > 0 {
|
||||||
return nil, errs
|
return nil, errs
|
||||||
}
|
}
|
||||||
|
@ -90,8 +102,6 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
state.Put("hook", hook)
|
state.Put("hook", hook)
|
||||||
state.Put("ui", ui)
|
state.Put("ui", ui)
|
||||||
|
|
||||||
launchBlockDevices := commonBlockDevices(b.config.VolumeMappings)
|
|
||||||
|
|
||||||
// Build the steps
|
// Build the steps
|
||||||
steps := []multistep.Step{
|
steps := []multistep.Step{
|
||||||
&awscommon.StepSourceAMIInfo{
|
&awscommon.StepSourceAMIInfo{
|
||||||
|
@ -126,12 +136,14 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
||||||
EbsOptimized: b.config.EbsOptimized,
|
EbsOptimized: b.config.EbsOptimized,
|
||||||
AvailabilityZone: b.config.AvailabilityZone,
|
AvailabilityZone: b.config.AvailabilityZone,
|
||||||
BlockDevices: launchBlockDevices,
|
BlockDevices: b.config.launchBlockDevices,
|
||||||
Tags: b.config.RunTags,
|
Tags: b.config.RunTags,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
||||||
},
|
},
|
||||||
&stepTagEBSVolumes{
|
&stepTagEBSVolumes{
|
||||||
VolumeMapping: b.config.VolumeMappings,
|
VolumeMapping: b.config.VolumeMappings,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
},
|
},
|
||||||
&awscommon.StepGetPassword{
|
&awscommon.StepGetPassword{
|
||||||
Debug: b.config.PackerDebug,
|
Debug: b.config.PackerDebug,
|
||||||
|
|
|
@ -3,19 +3,22 @@ package ebsvolume
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/mitchellh/multistep"
|
||||||
|
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/mitchellh/packer/packer"
|
||||||
|
"github.com/mitchellh/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type stepTagEBSVolumes struct {
|
type stepTagEBSVolumes struct {
|
||||||
VolumeMapping []BlockDevice
|
VolumeMapping []BlockDevice
|
||||||
|
Ctx interpolate.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stepTagEBSVolumes) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *stepTagEBSVolumes) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
instance := state.Get("instance").(*ec2.Instance)
|
instance := state.Get("instance").(*ec2.Instance)
|
||||||
|
sourceAMI := state.Get("source_image").(*ec2.Image)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
volumes := make(EbsVolumes)
|
volumes := make(EbsVolumes)
|
||||||
|
@ -40,12 +43,12 @@ func (s *stepTagEBSVolumes) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
tags := make([]*ec2.Tag, 0, len(mapping.Tags))
|
tags, err := awscommon.ConvertToEC2Tags(mapping.Tags, *ec2conn.Config.Region, *sourceAMI.ImageId, s.Ctx)
|
||||||
for key, value := range mapping.Tags {
|
if err != nil {
|
||||||
tags = append(tags, &ec2.Tag{
|
err := fmt.Errorf("Error tagging device %s with %s", mapping.DeviceName, err)
|
||||||
Key: aws.String(fmt.Sprintf("%s", key)),
|
state.Put("error", err)
|
||||||
Value: aws.String(fmt.Sprintf("%s", value)),
|
ui.Error(err.Error())
|
||||||
})
|
return multistep.ActionHalt
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, v := range instance.BlockDeviceMappings {
|
for _, v := range instance.BlockDeviceMappings {
|
||||||
|
|
|
@ -63,8 +63,13 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||||
InterpolateContext: &b.config.ctx,
|
InterpolateContext: &b.config.ctx,
|
||||||
InterpolateFilter: &interpolate.RenderFilter{
|
InterpolateFilter: &interpolate.RenderFilter{
|
||||||
Exclude: []string{
|
Exclude: []string{
|
||||||
|
"ami_description",
|
||||||
"bundle_upload_command",
|
"bundle_upload_command",
|
||||||
"bundle_vol_command",
|
"bundle_vol_command",
|
||||||
|
"run_tags",
|
||||||
|
"run_volume_tags",
|
||||||
|
"snapshot_tags",
|
||||||
|
"tags",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, configs...)
|
}, configs...)
|
||||||
|
@ -223,6 +228,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
AvailabilityZone: b.config.AvailabilityZone,
|
AvailabilityZone: b.config.AvailabilityZone,
|
||||||
BlockDevices: b.config.BlockDevices,
|
BlockDevices: b.config.BlockDevices,
|
||||||
Tags: b.config.RunTags,
|
Tags: b.config.RunTags,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
},
|
},
|
||||||
&awscommon.StepGetPassword{
|
&awscommon.StepGetPassword{
|
||||||
Debug: b.config.PackerDebug,
|
Debug: b.config.PackerDebug,
|
||||||
|
@ -265,10 +271,12 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
ProductCodes: b.config.AMIProductCodes,
|
ProductCodes: b.config.AMIProductCodes,
|
||||||
SnapshotUsers: b.config.SnapshotUsers,
|
SnapshotUsers: b.config.SnapshotUsers,
|
||||||
SnapshotGroups: b.config.SnapshotGroups,
|
SnapshotGroups: b.config.SnapshotGroups,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
},
|
},
|
||||||
&awscommon.StepCreateTags{
|
&awscommon.StepCreateTags{
|
||||||
Tags: b.config.AMITags,
|
Tags: b.config.AMITags,
|
||||||
SnapshotTags: b.config.SnapshotTags,
|
SnapshotTags: b.config.SnapshotTags,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,9 +29,14 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
registerOpts.VirtualizationType = aws.String(config.AMIVirtType)
|
registerOpts.VirtualizationType = aws.String(config.AMIVirtType)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
|
||||||
if config.AMIEnhancedNetworking {
|
if config.AMIEnhancedNetworking {
|
||||||
|
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
||||||
|
// As of February 2017, this applies to C3, C4, D2, I2, R3, and M4 (excluding m4.16xlarge)
|
||||||
registerOpts.SriovNetSupport = aws.String("simple")
|
registerOpts.SriovNetSupport = aws.String("simple")
|
||||||
|
|
||||||
|
// Set EnaSupport to true.
|
||||||
|
// As of February 2017, this applies to C5, I3, P2, R4, X1, and m4.16xlarge
|
||||||
|
registerOpts.EnaSupport = aws.Bool(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
registerResp, err := ec2conn.RegisterImage(registerOpts)
|
registerResp, err := ec2conn.RegisterImage(registerOpts)
|
||||||
|
|
|
@ -72,6 +72,8 @@ type Config struct {
|
||||||
AzureTags map[string]*string `mapstructure:"azure_tags"`
|
AzureTags map[string]*string `mapstructure:"azure_tags"`
|
||||||
ResourceGroupName string `mapstructure:"resource_group_name"`
|
ResourceGroupName string `mapstructure:"resource_group_name"`
|
||||||
StorageAccount string `mapstructure:"storage_account"`
|
StorageAccount string `mapstructure:"storage_account"`
|
||||||
|
TempComputeName string `mapstructure:"temp_compute_name"`
|
||||||
|
TempResourceGroupName string `mapstructure:"temp_resource_group_name"`
|
||||||
storageAccountBlobEndpoint string
|
storageAccountBlobEndpoint string
|
||||||
CloudEnvironmentName string `mapstructure:"cloud_environment_name"`
|
CloudEnvironmentName string `mapstructure:"cloud_environment_name"`
|
||||||
cloudEnvironment *azure.Environment
|
cloudEnvironment *azure.Environment
|
||||||
|
@ -288,9 +290,17 @@ func setRuntimeValues(c *Config) {
|
||||||
|
|
||||||
c.tmpAdminPassword = tempName.AdminPassword
|
c.tmpAdminPassword = tempName.AdminPassword
|
||||||
c.tmpCertificatePassword = tempName.CertificatePassword
|
c.tmpCertificatePassword = tempName.CertificatePassword
|
||||||
|
if c.TempComputeName == "" {
|
||||||
c.tmpComputeName = tempName.ComputeName
|
c.tmpComputeName = tempName.ComputeName
|
||||||
|
} else {
|
||||||
|
c.tmpComputeName = c.TempComputeName
|
||||||
|
}
|
||||||
c.tmpDeploymentName = tempName.DeploymentName
|
c.tmpDeploymentName = tempName.DeploymentName
|
||||||
|
if c.TempResourceGroupName == "" {
|
||||||
c.tmpResourceGroupName = tempName.ResourceGroupName
|
c.tmpResourceGroupName = tempName.ResourceGroupName
|
||||||
|
} else {
|
||||||
|
c.tmpResourceGroupName = c.TempResourceGroupName
|
||||||
|
}
|
||||||
c.tmpOSDiskName = tempName.OSDiskName
|
c.tmpOSDiskName = tempName.OSDiskName
|
||||||
c.tmpKeyVaultName = tempName.KeyVaultName
|
c.tmpKeyVaultName = tempName.KeyVaultName
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,11 +70,15 @@ func (c *Communicator) Upload(dst string, src io.Reader, fi *os.FileInfo) error
|
||||||
|
|
||||||
// Copy the contents to the temporary file
|
// Copy the contents to the temporary file
|
||||||
_, err = io.Copy(tempfile, src)
|
_, err = io.Copy(tempfile, src)
|
||||||
tempfile.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if fi != nil {
|
||||||
|
tempfile.Chmod((*fi).Mode())
|
||||||
|
}
|
||||||
|
tempfile.Close()
|
||||||
|
|
||||||
// Copy the file into place by copying the temporary file we put
|
// Copy the file into place by copying the temporary file we put
|
||||||
// into the shared folder into the proper location in the container
|
// into the shared folder into the proper location in the container
|
||||||
cmd := &packer.RemoteCmd{
|
cmd := &packer.RemoteCmd{
|
||||||
|
|
|
@ -97,9 +97,10 @@ func (d *DockerDriver) Export(id string, dst io.Writer) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DockerDriver) Import(path string, repo string) (string, error) {
|
func (d *DockerDriver) Import(path string, repo string) (string, error) {
|
||||||
var stdout bytes.Buffer
|
var stdout, stderr bytes.Buffer
|
||||||
cmd := exec.Command("docker", "import", "-", repo)
|
cmd := exec.Command("docker", "import", "-", repo)
|
||||||
cmd.Stdout = &stdout
|
cmd.Stdout = &stdout
|
||||||
|
cmd.Stderr = &stderr
|
||||||
stdin, err := cmd.StdinPipe()
|
stdin, err := cmd.StdinPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -122,8 +123,7 @@ func (d *DockerDriver) Import(path string, repo string) (string, error) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err := cmd.Wait(); err != nil {
|
if err := cmd.Wait(); err != nil {
|
||||||
err = fmt.Errorf("Error importing container: %s", err)
|
return "", fmt.Errorf("Error importing container: %s\n\nStderr: %s", err, stderr.String())
|
||||||
return "", err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return strings.TrimSpace(stdout.String()), nil
|
return strings.TrimSpace(stdout.String()), nil
|
||||||
|
@ -275,8 +275,34 @@ func (d *DockerDriver) StopContainer(id string) error {
|
||||||
|
|
||||||
func (d *DockerDriver) TagImage(id string, repo string, force bool) error {
|
func (d *DockerDriver) TagImage(id string, repo string, force bool) error {
|
||||||
args := []string{"tag"}
|
args := []string{"tag"}
|
||||||
|
|
||||||
|
// detect running docker version before tagging
|
||||||
|
// flag `force` for docker tagging was removed after Docker 1.12.0
|
||||||
|
// to keep its backward compatibility, we are not going to remove `force`
|
||||||
|
// option, but to ignore it when Docker version >= 1.12.0
|
||||||
|
//
|
||||||
|
// for more detail, please refer to the following links:
|
||||||
|
// - https://docs.docker.com/engine/deprecated/#/f-flag-on-docker-tag
|
||||||
|
// - https://github.com/docker/docker/pull/23090
|
||||||
|
version_running, err := d.Version()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
version_deprecated, err := version.NewVersion(string("1.12.0"))
|
||||||
|
if err != nil {
|
||||||
|
// should never reach this line
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if force {
|
if force {
|
||||||
|
if version_running.LessThan(version_deprecated) {
|
||||||
args = append(args, "-f")
|
args = append(args, "-f")
|
||||||
|
} else {
|
||||||
|
// do nothing if Docker version >= 1.12.0
|
||||||
|
log.Printf("[WARN] option: \"force\" will be ignored here")
|
||||||
|
log.Printf("since it was removed after Docker 1.12.0 released")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
args = append(args, id, repo)
|
args = append(args, id, repo)
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,7 @@ type Config struct {
|
||||||
Network string `mapstructure:"network"`
|
Network string `mapstructure:"network"`
|
||||||
NetworkProjectId string `mapstructure:"network_project_id"`
|
NetworkProjectId string `mapstructure:"network_project_id"`
|
||||||
OmitExternalIP bool `mapstructure:"omit_external_ip"`
|
OmitExternalIP bool `mapstructure:"omit_external_ip"`
|
||||||
|
OnHostMaintenance string `mapstructure:"on_host_maintenance"`
|
||||||
Preemptible bool `mapstructure:"preemptible"`
|
Preemptible bool `mapstructure:"preemptible"`
|
||||||
RawStateTimeout string `mapstructure:"state_timeout"`
|
RawStateTimeout string `mapstructure:"state_timeout"`
|
||||||
Region string `mapstructure:"region"`
|
Region string `mapstructure:"region"`
|
||||||
|
@ -92,6 +93,22 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
|
||||||
if c.ImageDescription == "" {
|
if c.ImageDescription == "" {
|
||||||
c.ImageDescription = "Created by Packer"
|
c.ImageDescription = "Created by Packer"
|
||||||
}
|
}
|
||||||
|
// Setting OnHostMaintenance Correct Defaults
|
||||||
|
// "MIGRATE" : Possible if Preemptible is false
|
||||||
|
// "TERMINATE": Posssible if Preemptible is true
|
||||||
|
if c.OnHostMaintenance == "" && c.Preemptible {
|
||||||
|
c.OnHostMaintenance = "MIGRATE"
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.OnHostMaintenance == "" && !c.Preemptible {
|
||||||
|
c.OnHostMaintenance = "TERMINATE"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure user sets a valid value for on_host_maintenance option
|
||||||
|
if !(c.OnHostMaintenance == "MIGRATE" || c.OnHostMaintenance == "TERMINATE") {
|
||||||
|
errs = packer.MultiErrorAppend(errs,
|
||||||
|
errors.New("on_host_maintenance must be one of MIGRATE or TERMINATE."))
|
||||||
|
}
|
||||||
|
|
||||||
if c.ImageName == "" {
|
if c.ImageName == "" {
|
||||||
img, err := interpolate.Render("packer-{{timestamp}}", nil)
|
img, err := interpolate.Render("packer-{{timestamp}}", nil)
|
||||||
|
|
|
@ -104,6 +104,21 @@ func TestConfigPrepare(t *testing.T) {
|
||||||
"SO VERY BAD",
|
"SO VERY BAD",
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"on_host_maintenance",
|
||||||
|
nil,
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"on_host_maintenance",
|
||||||
|
"TERMINATE",
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"on_host_maintenance",
|
||||||
|
"SO VERY BAD",
|
||||||
|
true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"preemptible",
|
"preemptible",
|
||||||
nil,
|
nil,
|
||||||
|
|
|
@ -69,6 +69,7 @@ type InstanceConfig struct {
|
||||||
Network string
|
Network string
|
||||||
NetworkProjectId string
|
NetworkProjectId string
|
||||||
OmitExternalIP bool
|
OmitExternalIP bool
|
||||||
|
OnHostMaintenance string
|
||||||
Preemptible bool
|
Preemptible bool
|
||||||
Region string
|
Region string
|
||||||
Scopes []string
|
Scopes []string
|
||||||
|
|
|
@ -386,6 +386,7 @@ func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Scheduling: &compute.Scheduling{
|
Scheduling: &compute.Scheduling{
|
||||||
|
OnHostMaintenance: c.OnHostMaintenance,
|
||||||
Preemptible: c.Preemptible,
|
Preemptible: c.Preemptible,
|
||||||
},
|
},
|
||||||
ServiceAccounts: []*compute.ServiceAccount{
|
ServiceAccounts: []*compute.ServiceAccount{
|
||||||
|
|
|
@ -67,7 +67,7 @@ func getImage(c *Config, d Driver) (*Image, error) {
|
||||||
if c.SourceImageProjectId == "" {
|
if c.SourceImageProjectId == "" {
|
||||||
return d.GetImage(name, fromFamily)
|
return d.GetImage(name, fromFamily)
|
||||||
} else {
|
} else {
|
||||||
return d.GetImageFromProject(c.SourceImageProjectId, c.SourceImage, fromFamily)
|
return d.GetImageFromProject(c.SourceImageProjectId, name, fromFamily)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,6 +110,7 @@ func (s *StepCreateInstance) Run(state multistep.StateBag) multistep.StepAction
|
||||||
Network: c.Network,
|
Network: c.Network,
|
||||||
NetworkProjectId: c.NetworkProjectId,
|
NetworkProjectId: c.NetworkProjectId,
|
||||||
OmitExternalIP: c.OmitExternalIP,
|
OmitExternalIP: c.OmitExternalIP,
|
||||||
|
OnHostMaintenance: c.OnHostMaintenance,
|
||||||
Preemptible: c.Preemptible,
|
Preemptible: c.Preemptible,
|
||||||
Region: c.Region,
|
Region: c.Region,
|
||||||
ServiceAccountEmail: c.Account.ClientEmail,
|
ServiceAccountEmail: c.Account.ClientEmail,
|
||||||
|
|
|
@ -23,11 +23,14 @@ type artifact struct {
|
||||||
func NewArtifact(dir string) (packer.Artifact, error) {
|
func NewArtifact(dir string) (packer.Artifact, error) {
|
||||||
files := make([]string, 0, 5)
|
files := make([]string, 0, 5)
|
||||||
visit := func(path string, info os.FileInfo, err error) error {
|
visit := func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if !info.IsDir() {
|
if !info.IsDir() {
|
||||||
files = append(files, path)
|
files = append(files, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := filepath.Walk(dir, visit); err != nil {
|
if err := filepath.Walk(dir, visit); err != nil {
|
||||||
|
|
|
@ -102,6 +102,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
&StepAllocateIp{
|
&StepAllocateIp{
|
||||||
FloatingIpPool: b.config.FloatingIpPool,
|
FloatingIpPool: b.config.FloatingIpPool,
|
||||||
FloatingIp: b.config.FloatingIp,
|
FloatingIp: b.config.FloatingIp,
|
||||||
|
ReuseIps: b.config.ReuseIps,
|
||||||
},
|
},
|
||||||
&communicator.StepConnect{
|
&communicator.StepConnect{
|
||||||
Config: &b.config.RunConfig.Comm,
|
Config: &b.config.RunConfig.Comm,
|
||||||
|
|
|
@ -23,6 +23,7 @@ type RunConfig struct {
|
||||||
RackconnectWait bool `mapstructure:"rackconnect_wait"`
|
RackconnectWait bool `mapstructure:"rackconnect_wait"`
|
||||||
FloatingIpPool string `mapstructure:"floating_ip_pool"`
|
FloatingIpPool string `mapstructure:"floating_ip_pool"`
|
||||||
FloatingIp string `mapstructure:"floating_ip"`
|
FloatingIp string `mapstructure:"floating_ip"`
|
||||||
|
ReuseIps bool `mapstructure:"reuse_ips"`
|
||||||
SecurityGroups []string `mapstructure:"security_groups"`
|
SecurityGroups []string `mapstructure:"security_groups"`
|
||||||
Networks []string `mapstructure:"networks"`
|
Networks []string `mapstructure:"networks"`
|
||||||
UserData string `mapstructure:"user_data"`
|
UserData string `mapstructure:"user_data"`
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
type StepAllocateIp struct {
|
type StepAllocateIp struct {
|
||||||
FloatingIpPool string
|
FloatingIpPool string
|
||||||
FloatingIp string
|
FloatingIp string
|
||||||
|
ReuseIps bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
@ -37,8 +38,9 @@ func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
if s.FloatingIp != "" {
|
if s.FloatingIp != "" {
|
||||||
instanceIp.IP = s.FloatingIp
|
instanceIp.IP = s.FloatingIp
|
||||||
} else if s.FloatingIpPool != "" {
|
} else if s.FloatingIpPool != "" {
|
||||||
// If we have a free floating IP in the pool, use it first
|
// If ReuseIps is set to true and we have a free floating IP in
|
||||||
// rather than creating one
|
// the pool, use it first rather than creating one
|
||||||
|
if s.ReuseIps {
|
||||||
ui.Say(fmt.Sprintf("Searching for unassociated floating IP in pool %s", s.FloatingIpPool))
|
ui.Say(fmt.Sprintf("Searching for unassociated floating IP in pool %s", s.FloatingIpPool))
|
||||||
pager := floatingips.List(client)
|
pager := floatingips.List(client)
|
||||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||||
|
@ -68,6 +70,7 @@ func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
ui.Error(err.Error())
|
ui.Error(err.Error())
|
||||||
return multistep.ActionHalt
|
return multistep.ActionHalt
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if instanceIp.IP == "" {
|
if instanceIp.IP == "" {
|
||||||
ui.Say(fmt.Sprintf("Creating floating IP..."))
|
ui.Say(fmt.Sprintf("Creating floating IP..."))
|
||||||
|
|
|
@ -28,6 +28,9 @@ type artifact struct {
|
||||||
func NewArtifact(dir string) (packer.Artifact, error) {
|
func NewArtifact(dir string) (packer.Artifact, error) {
|
||||||
files := make([]string, 0, 5)
|
files := make([]string, 0, 5)
|
||||||
visit := func(path string, info os.FileInfo, err error) error {
|
visit := func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
for _, unnecessaryFile := range unnecessaryFiles {
|
for _, unnecessaryFile := range unnecessaryFiles {
|
||||||
if unnecessary, _ := regexp.MatchString(unnecessaryFile, path); unnecessary {
|
if unnecessary, _ := regexp.MatchString(unnecessaryFile, path); unnecessary {
|
||||||
return os.RemoveAll(path)
|
return os.RemoveAll(path)
|
||||||
|
@ -38,7 +41,7 @@ func NewArtifact(dir string) (packer.Artifact, error) {
|
||||||
files = append(files, path)
|
files = append(files, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := filepath.Walk(dir, visit); err != nil {
|
if err := filepath.Walk(dir, visit); err != nil {
|
||||||
|
|
|
@ -444,11 +444,14 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
// Compile the artifact list
|
// Compile the artifact list
|
||||||
files := make([]string, 0, 5)
|
files := make([]string, 0, 5)
|
||||||
visit := func(path string, info os.FileInfo, err error) error {
|
visit := func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if !info.IsDir() {
|
if !info.IsDir() {
|
||||||
files = append(files, path)
|
files = append(files, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := filepath.Walk(b.config.OutputDir, visit); err != nil {
|
if err := filepath.Walk(b.config.OutputDir, visit); err != nil {
|
||||||
|
|
|
@ -38,7 +38,6 @@ func (s *stepConvertDisk) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
}
|
}
|
||||||
|
|
||||||
command = append(command, []string{
|
command = append(command, []string{
|
||||||
"-f", config.Format,
|
|
||||||
"-O", config.Format,
|
"-O", config.Format,
|
||||||
sourcePath,
|
sourcePath,
|
||||||
targetPath,
|
targetPath,
|
||||||
|
|
|
@ -22,7 +22,6 @@ func (s *stepCopyDisk) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
|
||||||
command := []string{
|
command := []string{
|
||||||
"convert",
|
"convert",
|
||||||
"-f", config.Format,
|
|
||||||
"-O", config.Format,
|
"-O", config.Format,
|
||||||
isoPath,
|
isoPath,
|
||||||
path,
|
path,
|
||||||
|
|
|
@ -116,7 +116,7 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error
|
||||||
ui.Message(fmt.Sprintf(
|
ui.Message(fmt.Sprintf(
|
||||||
"The VM will be run headless, without a GUI. If you want to\n"+
|
"The VM will be run headless, without a GUI. If you want to\n"+
|
||||||
"view the screen of the VM, connect via VNC without a password to\n"+
|
"view the screen of the VM, connect via VNC without a password to\n"+
|
||||||
"%s:%d", vncIp, vncPort))
|
"vnc://%s:%d", vncIp, vncPort))
|
||||||
} else {
|
} else {
|
||||||
ui.Message("The VM will be run headless, without a GUI, as configured.\n" +
|
ui.Message("The VM will be run headless, without a GUI, as configured.\n" +
|
||||||
"If the run isn't succeeding as you expect, please enable the GUI\n" +
|
"If the run isn't succeeding as you expect, please enable the GUI\n" +
|
||||||
|
|
|
@ -23,6 +23,9 @@ type artifact struct {
|
||||||
func NewArtifact(dir string) (packer.Artifact, error) {
|
func NewArtifact(dir string) (packer.Artifact, error) {
|
||||||
files := make([]string, 0, 5)
|
files := make([]string, 0, 5)
|
||||||
visit := func(path string, info os.FileInfo, err error) error {
|
visit := func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if !info.IsDir() {
|
if !info.IsDir() {
|
||||||
files = append(files, path)
|
files = append(files, path)
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,6 +56,7 @@ func (s *StepAttachGuestAdditions) Run(state multistep.StateBag) multistep.StepA
|
||||||
|
|
||||||
// Track the path so that we can unregister it from VirtualBox later
|
// Track the path so that we can unregister it from VirtualBox later
|
||||||
s.attachedPath = guestAdditionsPath
|
s.attachedPath = guestAdditionsPath
|
||||||
|
state.Put("guest_additions_attached", true)
|
||||||
|
|
||||||
return multistep.ActionContinue
|
return multistep.ActionContinue
|
||||||
}
|
}
|
||||||
|
@ -66,7 +67,6 @@ func (s *StepAttachGuestAdditions) Cleanup(state multistep.StateBag) {
|
||||||
}
|
}
|
||||||
|
|
||||||
driver := state.Get("driver").(Driver)
|
driver := state.Get("driver").(Driver)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
|
||||||
vmName := state.Get("vmName").(string)
|
vmName := state.Get("vmName").(string)
|
||||||
|
|
||||||
command := []string{
|
command := []string{
|
||||||
|
@ -77,7 +77,7 @@ func (s *StepAttachGuestAdditions) Cleanup(state multistep.StateBag) {
|
||||||
"--medium", "none",
|
"--medium", "none",
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := driver.VBoxManage(command...); err != nil {
|
// Remove the ISO. Note that this will probably fail since
|
||||||
ui.Error(fmt.Sprintf("Error unregistering guest additions: %s", err))
|
// stepRemoveDevices does this as well. No big deal.
|
||||||
}
|
driver.VBoxManage(command...)
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,6 +79,23 @@ func (s *StepRemoveDevices) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if _, ok := state.GetOk("guest_additions_attached"); ok {
|
||||||
|
ui.Message("Removing guest additions drive...")
|
||||||
|
command := []string{
|
||||||
|
"storageattach", vmName,
|
||||||
|
"--storagectl", "IDE Controller",
|
||||||
|
"--port", "1",
|
||||||
|
"--device", "0",
|
||||||
|
"--medium", "none",
|
||||||
|
}
|
||||||
|
if err := driver.VBoxManage(command...); err != nil {
|
||||||
|
err := fmt.Errorf("Error removing guest additions: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return multistep.ActionContinue
|
return multistep.ActionContinue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,7 +40,7 @@ func (s *StepRun) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
ui.Message(fmt.Sprintf(
|
ui.Message(fmt.Sprintf(
|
||||||
"The VM will be run headless, without a GUI. If you want to\n"+
|
"The VM will be run headless, without a GUI. If you want to\n"+
|
||||||
"view the screen of the VM, connect via VRDP without a password to\n"+
|
"view the screen of the VM, connect via VRDP without a password to\n"+
|
||||||
"%s:%d", vrdpIp, vrdpPort))
|
"rdp://%s:%d", vrdpIp, vrdpPort))
|
||||||
} else {
|
} else {
|
||||||
ui.Message("The VM will be run headless, without a GUI, as configured.\n" +
|
ui.Message("The VM will be run headless, without a GUI, as configured.\n" +
|
||||||
"If the run isn't succeeding as you expect, please enable the GUI\n" +
|
"If the run isn't succeeding as you expect, please enable the GUI\n" +
|
||||||
|
|
|
@ -76,7 +76,7 @@ func (s *stepCreateVM) Cleanup(state multistep.StateBag) {
|
||||||
ui.Say("Unregistering and deleting virtual machine...")
|
ui.Say("Unregistering and deleting virtual machine...")
|
||||||
var err error = nil
|
var err error = nil
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
err = driver.VBoxManage("unregistervm", s.vmName, "--delete")
|
err = driver.Delete(s.vmName)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,11 +23,13 @@ type localArtifact struct {
|
||||||
func NewLocalArtifact(dir string) (packer.Artifact, error) {
|
func NewLocalArtifact(dir string) (packer.Artifact, error) {
|
||||||
files := make([]string, 0, 5)
|
files := make([]string, 0, 5)
|
||||||
visit := func(path string, info os.FileInfo, err error) error {
|
visit := func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if !info.IsDir() {
|
if !info.IsDir() {
|
||||||
files = append(files, path)
|
files = append(files, path)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := filepath.Walk(dir, visit); err != nil {
|
if err := filepath.Walk(dir, visit); err != nil {
|
||||||
|
|
|
@ -63,7 +63,7 @@ func VNCPassword(skipPassword bool) string {
|
||||||
}
|
}
|
||||||
length := int(8)
|
length := int(8)
|
||||||
|
|
||||||
charSet := []byte("1234567890-=qwertyuiop[]asdfghjkl;zxcvbnm,./!@#%^*()_+QWERTYUIOP{}|ASDFGHJKL:XCVBNM<>?")
|
charSet := []byte("012345689abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||||
charSetLength := len(charSet)
|
charSetLength := len(charSet)
|
||||||
|
|
||||||
password := make([]byte, length)
|
password := make([]byte, length)
|
||||||
|
|
|
@ -1,46 +0,0 @@
|
||||||
diff a/builder/vmware/common/step_configure_vnc.go b/builder/vmware/common/step_configure_vnc.go (rejected hunks)
|
|
||||||
@@ -52,6 +52,21 @@ func (StepConfigureVNC) VNCAddress(portMin, portMax uint) (string, uint, error)
|
|
||||||
return "127.0.0.1", vncPort, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
+func VNCPassword() (string) {
|
|
||||||
+ length := int(8)
|
|
||||||
+
|
|
||||||
+ charSet := []byte("1234567890-=qwertyuiop[]asdfghjkl;zxcvbnm,./!@#%^*()_+QWERTYUIOP{}|ASDFGHJKL:XCVBNM<>?")
|
|
||||||
+ charSetLength := len(charSet)
|
|
||||||
+
|
|
||||||
+ password := make([]byte, length)
|
|
||||||
+
|
|
||||||
+ for i := 0; i < length; i++ {
|
|
||||||
+ password[i] = charSet[ rand.Intn(charSetLength) ]
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ return string(password)
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
func (s *StepConfigureVNC) Run(state multistep.StateBag) multistep.StepAction {
|
|
||||||
driver := state.Get("driver").(Driver)
|
|
||||||
ui := state.Get("ui").(packer.Ui)
|
|
||||||
@@ -86,12 +101,14 @@ func (s *StepConfigureVNC) Run(state multistep.StateBag) multistep.StepAction {
|
|
||||||
ui.Error(err.Error())
|
|
||||||
return multistep.ActionHalt
|
|
||||||
}
|
|
||||||
+ vncPassword := VNCPassword()
|
|
||||||
|
|
||||||
log.Printf("Found available VNC port: %d", vncPort)
|
|
||||||
|
|
||||||
vmxData := ParseVMX(string(vmxBytes))
|
|
||||||
vmxData["remotedisplay.vnc.enabled"] = "TRUE"
|
|
||||||
vmxData["remotedisplay.vnc.port"] = fmt.Sprintf("%d", vncPort)
|
|
||||||
+ vmxData["remotedisplay.vnc.password"] = vncPassword
|
|
||||||
|
|
||||||
if err := WriteVMX(vmxPath, vmxData); err != nil {
|
|
||||||
err := fmt.Errorf("Error writing VMX data: %s", err)
|
|
||||||
@@ -102,6 +119,7 @@ func (s *StepConfigureVNC) Run(state multistep.StateBag) multistep.StepAction {
|
|
||||||
|
|
||||||
state.Put("vnc_port", vncPort)
|
|
||||||
state.Put("vnc_ip", vncIp)
|
|
||||||
+ state.Put("vnc_password", vncPassword)
|
|
||||||
|
|
||||||
return multistep.ActionContinue
|
|
||||||
}
|
|
|
@ -48,7 +48,7 @@ func (s *StepRun) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
ui.Message(fmt.Sprintf(
|
ui.Message(fmt.Sprintf(
|
||||||
"The VM will be run headless, without a GUI. If you want to\n"+
|
"The VM will be run headless, without a GUI. If you want to\n"+
|
||||||
"view the screen of the VM, connect via VNC with the password \"%s\" to\n"+
|
"view the screen of the VM, connect via VNC with the password \"%s\" to\n"+
|
||||||
"%s:%d", vncPassword, vncIp, vncPort))
|
"vnc://%s:%d", vncPassword, vncIp, vncPort))
|
||||||
} else {
|
} else {
|
||||||
ui.Message("The VM will be run headless, without a GUI, as configured.\n" +
|
ui.Message("The VM will be run headless, without a GUI, as configured.\n" +
|
||||||
"If the run isn't succeeding as you expect, please enable the GUI\n" +
|
"If the run isn't succeeding as you expect, please enable the GUI\n" +
|
||||||
|
|
|
@ -1,26 +0,0 @@
|
||||||
diff a/builder/vmware/common/step_type_boot_command.go b/builder/vmware/common/step_type_boot_command.go (rejected hunks)
|
|
||||||
@@ -45,6 +45,7 @@ func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction
|
|
||||||
ui := state.Get("ui").(packer.Ui)
|
|
||||||
vncIp := state.Get("vnc_ip").(string)
|
|
||||||
vncPort := state.Get("vnc_port").(uint)
|
|
||||||
+ vncPassword := state.Get("vnc_password")
|
|
||||||
|
|
||||||
// Connect to VNC
|
|
||||||
ui.Say("Connecting to VM via VNC")
|
|
||||||
@@ -57,7 +58,15 @@ func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction
|
|
||||||
}
|
|
||||||
defer nc.Close()
|
|
||||||
|
|
||||||
- c, err := vnc.Client(nc, &vnc.ClientConfig{Exclusive: true})
|
|
||||||
+ var auth []vnc.ClientAuth
|
|
||||||
+
|
|
||||||
+ if vncPassword != nil {
|
|
||||||
+ auth = []vnc.ClientAuth{&vnc.PasswordAuth{Password: vncPassword.(string)}}
|
|
||||||
+ } else {
|
|
||||||
+ auth = []vnc.ClientAuth{}
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ c, err := vnc.Client(nc, &vnc.ClientConfig{Auth: auth, Exclusive: true})
|
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("Error handshaking with VNC: %s", err)
|
|
||||||
state.Put("error", err)
|
|
|
@ -38,19 +38,20 @@ type Config struct {
|
||||||
vmwcommon.VMXConfig `mapstructure:",squash"`
|
vmwcommon.VMXConfig `mapstructure:",squash"`
|
||||||
|
|
||||||
AdditionalDiskSize []uint `mapstructure:"disk_additional_size"`
|
AdditionalDiskSize []uint `mapstructure:"disk_additional_size"`
|
||||||
|
BootCommand []string `mapstructure:"boot_command"`
|
||||||
DiskName string `mapstructure:"vmdk_name"`
|
DiskName string `mapstructure:"vmdk_name"`
|
||||||
DiskSize uint `mapstructure:"disk_size"`
|
DiskSize uint `mapstructure:"disk_size"`
|
||||||
DiskTypeId string `mapstructure:"disk_type_id"`
|
DiskTypeId string `mapstructure:"disk_type_id"`
|
||||||
Format string `mapstructure:"format"`
|
Format string `mapstructure:"format"`
|
||||||
GuestOSType string `mapstructure:"guest_os_type"`
|
GuestOSType string `mapstructure:"guest_os_type"`
|
||||||
Version string `mapstructure:"version"`
|
|
||||||
VMName string `mapstructure:"vm_name"`
|
|
||||||
BootCommand []string `mapstructure:"boot_command"`
|
|
||||||
KeepRegistered bool `mapstructure:"keep_registered"`
|
KeepRegistered bool `mapstructure:"keep_registered"`
|
||||||
|
OVFToolOptions []string `mapstructure:"ovftool_options"`
|
||||||
SkipCompaction bool `mapstructure:"skip_compaction"`
|
SkipCompaction bool `mapstructure:"skip_compaction"`
|
||||||
SkipExport bool `mapstructure:"skip_export"`
|
SkipExport bool `mapstructure:"skip_export"`
|
||||||
VMXTemplatePath string `mapstructure:"vmx_template_path"`
|
VMName string `mapstructure:"vm_name"`
|
||||||
VMXDiskTemplatePath string `mapstructure:"vmx_disk_template_path"`
|
VMXDiskTemplatePath string `mapstructure:"vmx_disk_template_path"`
|
||||||
|
VMXTemplatePath string `mapstructure:"vmx_template_path"`
|
||||||
|
Version string `mapstructure:"version"`
|
||||||
|
|
||||||
RemoteType string `mapstructure:"remote_type"`
|
RemoteType string `mapstructure:"remote_type"`
|
||||||
RemoteDatastore string `mapstructure:"remote_datastore"`
|
RemoteDatastore string `mapstructure:"remote_datastore"`
|
||||||
|
|
|
@ -205,7 +205,7 @@ func (d *ESX5Driver) VNCAddress(_ string, portMin, portMax uint) (string, uint,
|
||||||
}
|
}
|
||||||
address := fmt.Sprintf("%s:%d", d.Host, port)
|
address := fmt.Sprintf("%s:%d", d.Host, port)
|
||||||
log.Printf("Trying address: %s...", address)
|
log.Printf("Trying address: %s...", address)
|
||||||
l, err := net.DialTimeout("tcp", address, 1*time.Second)
|
l, err := net.DialTimeout("tcp", address, 5*time.Second)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*net.OpError); ok {
|
if e, ok := err.(*net.OpError); ok {
|
||||||
|
@ -248,12 +248,7 @@ func (d *ESX5Driver) CommHost(state multistep.StateBag) (string, error) {
|
||||||
port = sshc.WinRMPort
|
port = sshc.WinRMPort
|
||||||
}
|
}
|
||||||
|
|
||||||
if address, ok := state.GetOk("vm_address"); ok {
|
|
||||||
return address.(string), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if address := config.CommConfig.Host(); address != "" {
|
if address := config.CommConfig.Host(); address != "" {
|
||||||
state.Put("vm_address", address)
|
|
||||||
return address, nil
|
return address, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -302,7 +297,6 @@ func (d *ESX5Driver) CommHost(state multistep.StateBag) (string, error) {
|
||||||
} else {
|
} else {
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
address := record["IPAddress"]
|
address := record["IPAddress"]
|
||||||
state.Put("vm_address", address)
|
|
||||||
return address, nil
|
return address, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,11 +88,4 @@ func TestESX5Driver_CommHost(t *testing.T) {
|
||||||
if host != expected_host {
|
if host != expected_host {
|
||||||
t.Errorf("bad host name: %s", host)
|
t.Errorf("bad host name: %s", host)
|
||||||
}
|
}
|
||||||
address, ok := state.GetOk("vm_address")
|
|
||||||
if !ok {
|
|
||||||
t.Error("state not updated with vm_address")
|
|
||||||
}
|
|
||||||
if address.(string) != expected_host {
|
|
||||||
t.Errorf("bad vm_address: %s", address.(string))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,13 +3,14 @@ package iso
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/mitchellh/multistep"
|
|
||||||
"github.com/mitchellh/packer/packer"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/mitchellh/multistep"
|
||||||
|
"github.com/mitchellh/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StepExport struct {
|
type StepExport struct {
|
||||||
|
@ -22,13 +23,14 @@ func (s *StepExport) generateArgs(c *Config, outputPath string, hidePassword boo
|
||||||
if hidePassword {
|
if hidePassword {
|
||||||
password = "****"
|
password = "****"
|
||||||
}
|
}
|
||||||
return []string{
|
args := []string{
|
||||||
"--noSSLVerify=true",
|
"--noSSLVerify=true",
|
||||||
"--skipManifestCheck",
|
"--skipManifestCheck",
|
||||||
"-tt=" + s.Format,
|
"-tt=" + s.Format,
|
||||||
"vi://" + c.RemoteUser + ":" + password + "@" + c.RemoteHost + "/" + c.VMName,
|
"vi://" + c.RemoteUser + ":" + password + "@" + c.RemoteHost + "/" + c.VMName,
|
||||||
outputPath,
|
outputPath,
|
||||||
}
|
}
|
||||||
|
return append(c.OVFToolOptions, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepExport) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepExport) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
|
|
@ -92,15 +92,18 @@ func (c BuildCommand) Run(args []string) int {
|
||||||
Color: colors[i%len(colors)],
|
Color: colors[i%len(colors)],
|
||||||
Ui: ui,
|
Ui: ui,
|
||||||
}
|
}
|
||||||
|
if _, ok := c.Ui.(*packer.MachineReadableUi); !ok {
|
||||||
|
ui.Say(fmt.Sprintf("%s output will be in this color.", b))
|
||||||
|
if i+1 == len(buildNames) {
|
||||||
|
// Add a newline between the color output and the actual output
|
||||||
|
c.Ui.Say("")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
buildUis[b] = ui
|
buildUis[b] = ui
|
||||||
ui.Say(fmt.Sprintf("%s output will be in this color.", b))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add a newline between the color output and the actual output
|
|
||||||
c.Ui.Say("")
|
|
||||||
|
|
||||||
log.Printf("Build debug mode: %v", cfgDebug)
|
log.Printf("Build debug mode: %v", cfgDebug)
|
||||||
log.Printf("Force build: %v", cfgForce)
|
log.Printf("Force build: %v", cfgForce)
|
||||||
log.Printf("On error: %v", cfgOnError)
|
log.Printf("On error: %v", cfgOnError)
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
|
|
||||||
amazonchrootbuilder "github.com/mitchellh/packer/builder/amazon/chroot"
|
amazonchrootbuilder "github.com/mitchellh/packer/builder/amazon/chroot"
|
||||||
amazonebsbuilder "github.com/mitchellh/packer/builder/amazon/ebs"
|
amazonebsbuilder "github.com/mitchellh/packer/builder/amazon/ebs"
|
||||||
|
amazonebssurrogatebuilder "github.com/mitchellh/packer/builder/amazon/ebssurrogate"
|
||||||
amazonebsvolumebuilder "github.com/mitchellh/packer/builder/amazon/ebsvolume"
|
amazonebsvolumebuilder "github.com/mitchellh/packer/builder/amazon/ebsvolume"
|
||||||
amazoninstancebuilder "github.com/mitchellh/packer/builder/amazon/instance"
|
amazoninstancebuilder "github.com/mitchellh/packer/builder/amazon/instance"
|
||||||
azurearmbuilder "github.com/mitchellh/packer/builder/azure/arm"
|
azurearmbuilder "github.com/mitchellh/packer/builder/azure/arm"
|
||||||
|
@ -75,6 +76,7 @@ var Builders = map[string]packer.Builder{
|
||||||
"amazon-chroot": new(amazonchrootbuilder.Builder),
|
"amazon-chroot": new(amazonchrootbuilder.Builder),
|
||||||
"amazon-ebs": new(amazonebsbuilder.Builder),
|
"amazon-ebs": new(amazonebsbuilder.Builder),
|
||||||
"amazon-ebsvolume": new(amazonebsvolumebuilder.Builder),
|
"amazon-ebsvolume": new(amazonebsvolumebuilder.Builder),
|
||||||
|
"amazon-ebssurrogate": new(amazonebssurrogatebuilder.Builder),
|
||||||
"amazon-instance": new(amazoninstancebuilder.Builder),
|
"amazon-instance": new(amazoninstancebuilder.Builder),
|
||||||
"azure-arm": new(azurearmbuilder.Builder),
|
"azure-arm": new(azurearmbuilder.Builder),
|
||||||
"cloudstack": new(cloudstackbuilder.Builder),
|
"cloudstack": new(cloudstackbuilder.Builder),
|
||||||
|
|
|
@ -52,18 +52,16 @@ func (c *VersionCommand) Run(args []string) int {
|
||||||
// If we have a version check function, then let's check for
|
// If we have a version check function, then let's check for
|
||||||
// the latest version as well.
|
// the latest version as well.
|
||||||
if c.CheckFunc != nil {
|
if c.CheckFunc != nil {
|
||||||
// Separate the prior output with a newline
|
|
||||||
c.Ui.Say("")
|
|
||||||
|
|
||||||
// Check the latest version
|
// Check the latest version
|
||||||
info, err := c.CheckFunc()
|
info, err := c.CheckFunc()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Ui.Error(fmt.Sprintf(
|
c.Ui.Error(fmt.Sprintf(
|
||||||
"Error checking latest version: %s", err))
|
"\nError checking latest version: %s", err))
|
||||||
}
|
}
|
||||||
if info.Outdated {
|
if info.Outdated {
|
||||||
c.Ui.Say(fmt.Sprintf(
|
c.Ui.Say(fmt.Sprintf(
|
||||||
"Your version of Packer is out of date! The latest version\n"+
|
"\nYour version of Packer is out of date! The latest version\n"+
|
||||||
"is %s. You can update by downloading from www.packer.io",
|
"is %s. You can update by downloading from www.packer.io",
|
||||||
info.Latest))
|
info.Latest))
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"github.com/mitchellh/packer/version"
|
"github.com/mitchellh/packer/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Commands is the mapping of all the available Terraform commands.
|
// Commands is the mapping of all the available Packer commands.
|
||||||
var Commands map[string]cli.CommandFactory
|
var Commands map[string]cli.CommandFactory
|
||||||
|
|
||||||
// CommandMeta is the Meta to use for the commands. This must be written
|
// CommandMeta is the Meta to use for the commands. This must be written
|
||||||
|
|
|
@ -114,6 +114,9 @@ func (s *StepCreateFloppy) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
|
||||||
var crawlDirectoryFiles []string
|
var crawlDirectoryFiles []string
|
||||||
crawlDirectory := func(path string, info os.FileInfo, err error) error {
|
crawlDirectory := func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if !info.IsDir() {
|
if !info.IsDir() {
|
||||||
crawlDirectoryFiles = append(crawlDirectoryFiles, path)
|
crawlDirectoryFiles = append(crawlDirectoryFiles, path)
|
||||||
ui.Message(fmt.Sprintf("Adding file: %s", path))
|
ui.Message(fmt.Sprintf("Adding file: %s", path))
|
||||||
|
|
|
@ -21,6 +21,9 @@ const TestFixtures = "test-fixtures"
|
||||||
func getDirectory(path string) []string {
|
func getDirectory(path string) []string {
|
||||||
var result []string
|
var result []string
|
||||||
walk := func(path string, info os.FileInfo, err error) error {
|
walk := func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if info.IsDir() && !strings.HasSuffix(path, "/") {
|
if info.IsDir() && !strings.HasSuffix(path, "/") {
|
||||||
path = path + "/"
|
path = path + "/"
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,7 +87,7 @@ func TestUpload(t *testing.T) {
|
||||||
t.Fatalf("error creating communicator: %s", err)
|
t.Fatalf("error creating communicator: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.Upload("C:/Temp/terraform.cmd", bytes.NewReader([]byte("something")), nil)
|
err = c.Upload("C:/Temp/packer.cmd", bytes.NewReader([]byte("something")), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error uploading file: %s", err)
|
t.Fatalf("error uploading file: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,6 +41,7 @@
|
||||||
"/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync"
|
"/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync"
|
||||||
],
|
],
|
||||||
"inline_shebang": "/bin/sh -x",
|
"inline_shebang": "/bin/sh -x",
|
||||||
|
"skip_clean": true,
|
||||||
"type": "shell"
|
"type": "shell"
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,10 +17,10 @@
|
||||||
"os_type": "Linux",
|
"os_type": "Linux",
|
||||||
"image_publisher": "Canonical",
|
"image_publisher": "Canonical",
|
||||||
"image_offer": "UbuntuServer",
|
"image_offer": "UbuntuServer",
|
||||||
"image_sku": "16.04.0-LTS",
|
"image_sku": "16.04-LTS",
|
||||||
|
|
||||||
"location": "West US",
|
"location": "West US",
|
||||||
"vm_size": "Standard_A2"
|
"vm_size": "Standard_DS1_v2"
|
||||||
}],
|
}],
|
||||||
"provisioners": [{
|
"provisioners": [{
|
||||||
"execute_command": "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'",
|
"execute_command": "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'",
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
"subscription_id": "{{user `subscription_id`}}",
|
"subscription_id": "{{user `subscription_id`}}",
|
||||||
"object_id": "{{user `object_id`}}",
|
"object_id": "{{user `object_id`}}",
|
||||||
|
|
||||||
|
|
||||||
"capture_container_name": "images",
|
"capture_container_name": "images",
|
||||||
"capture_name_prefix": "packer",
|
"capture_name_prefix": "packer",
|
||||||
|
|
||||||
|
@ -34,15 +33,13 @@
|
||||||
|
|
||||||
"location": "West US",
|
"location": "West US",
|
||||||
"vm_size": "Standard_A2"
|
"vm_size": "Standard_A2"
|
||||||
}
|
}],
|
||||||
],
|
"provisioners": [{
|
||||||
"provisioners": [
|
|
||||||
{
|
|
||||||
"type": "powershell",
|
"type": "powershell",
|
||||||
"inline": [
|
"inline": [
|
||||||
"dir c:\\"
|
"if( Test-Path $Env:SystemRoot\\windows\\system32\\Sysprep\\unattend.xml ){ rm $Env:SystemRoot\\windows\\system32\\Sysprep\\unattend.xml -Force}",
|
||||||
]
|
"& $Env:SystemRoot\\System32\\Sysprep\\Sysprep.exe /oobe /generalize /shutdown /quiet"
|
||||||
}
|
|
||||||
]
|
]
|
||||||
|
}]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,8 @@
|
||||||
"provisioners": [{
|
"provisioners": [{
|
||||||
"type": "powershell",
|
"type": "powershell",
|
||||||
"inline": [
|
"inline": [
|
||||||
"dir c:\\"
|
"if( Test-Path $Env:SystemRoot\\windows\\system32\\Sysprep\\unattend.xml ){ rm $Env:SystemRoot\\windows\\system32\\Sysprep\\unattend.xml -Force}",
|
||||||
|
"& $Env:SystemRoot\\System32\\Sysprep\\Sysprep.exe /oobe /generalize /shutdown /quiet"
|
||||||
]
|
]
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
commonssh "github.com/mitchellh/packer/common/ssh"
|
commonssh "github.com/mitchellh/packer/common/ssh"
|
||||||
"github.com/mitchellh/packer/communicator/ssh"
|
"github.com/mitchellh/packer/communicator/ssh"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/mitchellh/packer/packer"
|
||||||
|
"github.com/xanzy/ssh-agent"
|
||||||
gossh "golang.org/x/crypto/ssh"
|
gossh "golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -94,6 +95,7 @@ func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, cancel <-chan stru
|
||||||
|
|
||||||
conf, err := sshBastionConfig(s.Config)
|
conf, err := sshBastionConfig(s.Config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Printf("[ERROR] Error calling sshBastionConfig: %v", err)
|
||||||
return nil, fmt.Errorf("Error configuring bastion: %s", err)
|
return nil, fmt.Errorf("Error configuring bastion: %s", err)
|
||||||
}
|
}
|
||||||
bConf = conf
|
bConf = conf
|
||||||
|
@ -196,7 +198,15 @@ func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, cancel <-chan stru
|
||||||
}
|
}
|
||||||
|
|
||||||
func sshBastionConfig(config *Config) (*gossh.ClientConfig, error) {
|
func sshBastionConfig(config *Config) (*gossh.ClientConfig, error) {
|
||||||
auth := make([]gossh.AuthMethod, 0, 2)
|
var auth []gossh.AuthMethod
|
||||||
|
|
||||||
|
if !config.SSHDisableAgent {
|
||||||
|
log.Printf("[INFO] SSH agent forwarding enabled.")
|
||||||
|
if sshAgent := sshAgent(); sshAgent != nil {
|
||||||
|
auth = append(auth, sshAgent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if config.SSHBastionPassword != "" {
|
if config.SSHBastionPassword != "" {
|
||||||
auth = append(auth,
|
auth = append(auth,
|
||||||
gossh.Password(config.SSHBastionPassword),
|
gossh.Password(config.SSHBastionPassword),
|
||||||
|
@ -218,3 +228,19 @@ func sshBastionConfig(config *Config) (*gossh.ClientConfig, error) {
|
||||||
Auth: auth,
|
Auth: auth,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sshAgent() gossh.AuthMethod {
|
||||||
|
if !sshagent.Available() {
|
||||||
|
log.Println("[DEBUG] Error fetching SSH_AUTH_SOCK.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
agent, _, err := sshagent.New()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[WARN] sshagent.New: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("[INFO] Using SSH Agent.")
|
||||||
|
return gossh.PublicKeysCallback(agent.Signers)
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,151 @@
|
||||||
|
package communicator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// startAgent sets ssh-agent environment variables
|
||||||
|
func startAgent(t *testing.T) func() {
|
||||||
|
if testing.Short() {
|
||||||
|
// ssh-agent is not always available, and the key
|
||||||
|
// types supported vary by platform.
|
||||||
|
t.Skip("skipping test due to -short or availability")
|
||||||
|
}
|
||||||
|
|
||||||
|
bin, err := exec.LookPath("ssh-agent")
|
||||||
|
if err != nil {
|
||||||
|
t.Skip("could not find ssh-agent")
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(bin, "-s")
|
||||||
|
out, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("cmd.Output: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Output looks like:
|
||||||
|
SSH_AUTH_SOCK=/tmp/ssh-P65gpcqArqvH/agent.15541; export SSH_AUTH_SOCK;
|
||||||
|
SSH_AGENT_PID=15542; export SSH_AGENT_PID;
|
||||||
|
echo Agent pid 15542;
|
||||||
|
*/
|
||||||
|
fields := bytes.Split(out, []byte(";"))
|
||||||
|
line := bytes.SplitN(fields[0], []byte("="), 2)
|
||||||
|
line[0] = bytes.TrimLeft(line[0], "\n")
|
||||||
|
if string(line[0]) != "SSH_AUTH_SOCK" {
|
||||||
|
t.Fatalf("could not find key SSH_AUTH_SOCK in %q", fields[0])
|
||||||
|
}
|
||||||
|
socket := string(line[1])
|
||||||
|
t.Logf("Socket value: %v", socket)
|
||||||
|
|
||||||
|
origSocket := os.Getenv("SSH_AUTH_SOCK")
|
||||||
|
if err := os.Setenv("SSH_AUTH_SOCK", socket); err != nil {
|
||||||
|
t.Fatalf("could not set SSH_AUTH_SOCK environment variable: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
line = bytes.SplitN(fields[2], []byte("="), 2)
|
||||||
|
line[0] = bytes.TrimLeft(line[0], "\n")
|
||||||
|
if string(line[0]) != "SSH_AGENT_PID" {
|
||||||
|
t.Fatalf("could not find key SSH_AGENT_PID in %q", fields[2])
|
||||||
|
}
|
||||||
|
pidStr := line[1]
|
||||||
|
t.Logf("Agent PID: %v", string(pidStr))
|
||||||
|
pid, err := strconv.Atoi(string(pidStr))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Atoi(%q): %v", pidStr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
proc, _ := os.FindProcess(pid)
|
||||||
|
if proc != nil {
|
||||||
|
proc.Kill()
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Setenv("SSH_AUTH_SOCK", origSocket)
|
||||||
|
os.RemoveAll(filepath.Dir(socket))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSSHAgent(t *testing.T) {
|
||||||
|
cleanup := startAgent(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
if auth := sshAgent(); auth == nil {
|
||||||
|
t.Error("Want `ssh.AuthMethod`, got `nil`")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSSHBastionConfig(t *testing.T) {
|
||||||
|
pemPath := TestPEM(t)
|
||||||
|
tests := []struct {
|
||||||
|
in *Config
|
||||||
|
errStr string
|
||||||
|
want int
|
||||||
|
fn func() func()
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
in: &Config{SSHDisableAgent: true},
|
||||||
|
want: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: &Config{SSHDisableAgent: false},
|
||||||
|
want: 0,
|
||||||
|
fn: func() func() {
|
||||||
|
cleanup := startAgent(t)
|
||||||
|
os.Unsetenv("SSH_AUTH_SOCK")
|
||||||
|
return cleanup
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: &Config{
|
||||||
|
SSHDisableAgent: false,
|
||||||
|
SSHBastionPassword: "foobar",
|
||||||
|
SSHBastionPrivateKey: pemPath,
|
||||||
|
},
|
||||||
|
want: 4,
|
||||||
|
fn: func() func() {
|
||||||
|
cleanup := startAgent(t)
|
||||||
|
return cleanup
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: &Config{
|
||||||
|
SSHBastionPrivateKey: pemPath,
|
||||||
|
},
|
||||||
|
want: 0,
|
||||||
|
errStr: "Failed to read key '" + pemPath + "': no key found",
|
||||||
|
fn: func() func() {
|
||||||
|
os.Truncate(pemPath, 0)
|
||||||
|
return func() {
|
||||||
|
if err := os.Remove(pemPath); err != nil {
|
||||||
|
t.Fatalf("os.Remove: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range tests {
|
||||||
|
func() {
|
||||||
|
if c.fn != nil {
|
||||||
|
defered := c.fn()
|
||||||
|
defer defered()
|
||||||
|
}
|
||||||
|
bConf, err := sshBastionConfig(c.in)
|
||||||
|
if err != nil {
|
||||||
|
if err.Error() != c.errStr {
|
||||||
|
t.Errorf("want error %v, got %q", c.errStr, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(bConf.Auth) != c.want {
|
||||||
|
t.Errorf("want %v ssh.AuthMethod, got %v ssh.AuthMethod", c.want, len(bConf.Auth))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
14
packer/ui.go
14
packer/ui.go
|
@ -1,6 +1,7 @@
|
||||||
package packer
|
package packer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -64,6 +65,7 @@ type BasicUi struct {
|
||||||
ErrorWriter io.Writer
|
ErrorWriter io.Writer
|
||||||
l sync.Mutex
|
l sync.Mutex
|
||||||
interrupted bool
|
interrupted bool
|
||||||
|
scanner *bufio.Scanner
|
||||||
}
|
}
|
||||||
|
|
||||||
// MachineReadableUi is a UI that only outputs machine-readable output
|
// MachineReadableUi is a UI that only outputs machine-readable output
|
||||||
|
@ -174,6 +176,9 @@ func (rw *BasicUi) Ask(query string) (string, error) {
|
||||||
return "", errors.New("interrupted")
|
return "", errors.New("interrupted")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if rw.scanner == nil {
|
||||||
|
rw.scanner = bufio.NewScanner(rw.Reader)
|
||||||
|
}
|
||||||
sigCh := make(chan os.Signal, 1)
|
sigCh := make(chan os.Signal, 1)
|
||||||
signal.Notify(sigCh, os.Interrupt)
|
signal.Notify(sigCh, os.Interrupt)
|
||||||
defer signal.Stop(sigCh)
|
defer signal.Stop(sigCh)
|
||||||
|
@ -188,10 +193,13 @@ func (rw *BasicUi) Ask(query string) (string, error) {
|
||||||
result := make(chan string, 1)
|
result := make(chan string, 1)
|
||||||
go func() {
|
go func() {
|
||||||
var line string
|
var line string
|
||||||
if _, err := fmt.Fscanln(rw.Reader, &line); err != nil {
|
if rw.scanner.Scan() {
|
||||||
log.Printf("ui: scan err: %s", err)
|
line = rw.scanner.Text()
|
||||||
|
}
|
||||||
|
if err := rw.scanner.Err(); err != nil {
|
||||||
|
log.Printf("ui: scan err: %s", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
result <- line
|
result <- line
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,12 @@ func readWriter(ui *BasicUi) (result string) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reset the input Reader than add some input to it.
|
||||||
|
func writeReader(ui *BasicUi, input string) {
|
||||||
|
buffer := ui.Reader.(*bytes.Buffer)
|
||||||
|
buffer.WriteString(input)
|
||||||
|
}
|
||||||
|
|
||||||
func readErrorWriter(ui *BasicUi) (result string) {
|
func readErrorWriter(ui *BasicUi) (result string) {
|
||||||
buffer := ui.ErrorWriter.(*bytes.Buffer)
|
buffer := ui.ErrorWriter.(*bytes.Buffer)
|
||||||
result = buffer.String()
|
result = buffer.String()
|
||||||
|
@ -192,6 +198,45 @@ func TestBasicUi_Say(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBasicUi_Ask(t *testing.T) {
|
||||||
|
|
||||||
|
var actual, expected string
|
||||||
|
var err error
|
||||||
|
|
||||||
|
var testCases = []struct {
|
||||||
|
Prompt, Input, Answer string
|
||||||
|
}{
|
||||||
|
{"[c]ontinue or [a]bort", "c\n", "c"},
|
||||||
|
{"[c]ontinue or [a]bort", "c", "c"},
|
||||||
|
// Empty input shouldn't give an error
|
||||||
|
{"Name", "Joe Bloggs\n", "Joe Bloggs"},
|
||||||
|
{"Name", "Joe Bloggs", "Joe Bloggs"},
|
||||||
|
{"Name", "\n", ""},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
// Because of the internal bufio we can't eaily reset the input, so create a new one each time
|
||||||
|
bufferUi := testUi()
|
||||||
|
writeReader(bufferUi, testCase.Input)
|
||||||
|
|
||||||
|
actual, err = bufferUi.Ask(testCase.Prompt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if actual != testCase.Answer {
|
||||||
|
t.Fatalf("bad answer: %#v", actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
actual = readWriter(bufferUi)
|
||||||
|
expected = testCase.Prompt + " "
|
||||||
|
if actual != expected {
|
||||||
|
t.Fatalf("bad prompt: %#v", actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func TestMachineReadableUi_ImplUi(t *testing.T) {
|
func TestMachineReadableUi_ImplUi(t *testing.T) {
|
||||||
var raw interface{}
|
var raw interface{}
|
||||||
raw = &MachineReadableUi{}
|
raw = &MachineReadableUi{}
|
||||||
|
|
|
@ -31,6 +31,9 @@ type Config struct {
|
||||||
SkipClean bool `mapstructure:"skip_clean"`
|
SkipClean bool `mapstructure:"skip_clean"`
|
||||||
Tags map[string]string `mapstructure:"tags"`
|
Tags map[string]string `mapstructure:"tags"`
|
||||||
Name string `mapstructure:"ami_name"`
|
Name string `mapstructure:"ami_name"`
|
||||||
|
Description string `mapstructure:"ami_description"`
|
||||||
|
Users []string `mapstructure:"ami_users"`
|
||||||
|
Groups []string `mapstrcuture:"ami_groups"`
|
||||||
|
|
||||||
ctx interpolate.Context
|
ctx interpolate.Context
|
||||||
}
|
}
|
||||||
|
@ -304,6 +307,60 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Apply atttributes for AMI specified in config
|
||||||
|
// (duped from builder/amazon/common/step_modify_ami_attributes.go)
|
||||||
|
options := make(map[string]*ec2.ModifyImageAttributeInput)
|
||||||
|
if p.config.Description != "" {
|
||||||
|
options["description"] = &ec2.ModifyImageAttributeInput{
|
||||||
|
Description: &ec2.AttributeValue{Value: &p.config.Description},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p.config.Groups) > 0 {
|
||||||
|
groups := make([]*string, len(p.config.Groups))
|
||||||
|
adds := make([]*ec2.LaunchPermission, len(p.config.Groups))
|
||||||
|
addGroups := &ec2.ModifyImageAttributeInput{
|
||||||
|
LaunchPermission: &ec2.LaunchPermissionModifications{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, g := range p.config.Groups {
|
||||||
|
groups[i] = aws.String(g)
|
||||||
|
adds[i] = &ec2.LaunchPermission{
|
||||||
|
Group: aws.String(g),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
addGroups.UserGroups = groups
|
||||||
|
addGroups.LaunchPermission.Add = adds
|
||||||
|
|
||||||
|
options["groups"] = addGroups
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p.config.Users) > 0 {
|
||||||
|
users := make([]*string, len(p.config.Users))
|
||||||
|
adds := make([]*ec2.LaunchPermission, len(p.config.Users))
|
||||||
|
for i, u := range p.config.Users {
|
||||||
|
users[i] = aws.String(u)
|
||||||
|
adds[i] = &ec2.LaunchPermission{UserId: aws.String(u)}
|
||||||
|
}
|
||||||
|
options["users"] = &ec2.ModifyImageAttributeInput{
|
||||||
|
UserIds: users,
|
||||||
|
LaunchPermission: &ec2.LaunchPermissionModifications{
|
||||||
|
Add: adds,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(options) > 0 {
|
||||||
|
for name, input := range options {
|
||||||
|
ui.Message(fmt.Sprintf("Modifying: %s", name))
|
||||||
|
input.ImageId = &createdami
|
||||||
|
_, err := ec2conn.ModifyImageAttribute(input)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, fmt.Errorf("Error modifying AMI attributes: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Add the reported AMI ID to the artifact list
|
// Add the reported AMI ID to the artifact list
|
||||||
log.Printf("Adding created AMI ID %s in region %s to output artifacts", createdami, *config.Region)
|
log.Printf("Adding created AMI ID %s in region %s to output artifacts", createdami, *config.Region)
|
||||||
artifact = &awscommon.Artifact{
|
artifact = &awscommon.Artifact{
|
||||||
|
|
|
@ -110,9 +110,10 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac
|
||||||
return nil, false, fmt.Errorf("VMX, OVF or OVA file not found")
|
return nil, false, fmt.Errorf("VMX, OVF or OVA file not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
password := url.QueryEscape(p.config.Password)
|
||||||
ovftool_uri := fmt.Sprintf("vi://%s:%s@%s/%s/host/%s",
|
ovftool_uri := fmt.Sprintf("vi://%s:%s@%s/%s/host/%s",
|
||||||
url.QueryEscape(p.config.Username),
|
url.QueryEscape(p.config.Username),
|
||||||
url.QueryEscape(p.config.Password),
|
password,
|
||||||
p.config.Host,
|
p.config.Host,
|
||||||
p.config.Datacenter,
|
p.config.Datacenter,
|
||||||
p.config.Cluster)
|
p.config.Cluster)
|
||||||
|
@ -128,7 +129,12 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac
|
||||||
|
|
||||||
ui.Message(fmt.Sprintf("Uploading %s to vSphere", source))
|
ui.Message(fmt.Sprintf("Uploading %s to vSphere", source))
|
||||||
|
|
||||||
log.Printf("Starting ovftool with parameters: %s", strings.Join(args, " "))
|
log.Printf("Starting ovftool with parameters: %s",
|
||||||
|
strings.Replace(
|
||||||
|
strings.Join(args, " "),
|
||||||
|
password,
|
||||||
|
"<password>",
|
||||||
|
-1))
|
||||||
cmd := exec.Command("ovftool", args...)
|
cmd := exec.Command("ovftool", args...)
|
||||||
cmd.Stdout = os.Stdout
|
cmd.Stdout = os.Stdout
|
||||||
cmd.Stderr = os.Stderr
|
cmd.Stderr = os.Stderr
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/mitchellh/packer/common"
|
"github.com/mitchellh/packer/common"
|
||||||
|
"github.com/mitchellh/packer/common/uuid"
|
||||||
"github.com/mitchellh/packer/helper/config"
|
"github.com/mitchellh/packer/helper/config"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/mitchellh/packer/packer"
|
||||||
"github.com/mitchellh/packer/template/interpolate"
|
"github.com/mitchellh/packer/template/interpolate"
|
||||||
|
@ -85,7 +86,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.config.StagingDir == "" {
|
if p.config.StagingDir == "" {
|
||||||
p.config.StagingDir = DefaultStagingDir
|
p.config.StagingDir = filepath.Join(DefaultStagingDir, uuid.TimeOrderedUUID())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validation
|
// Validation
|
||||||
|
|
|
@ -1,10 +1,13 @@
|
||||||
package ansiblelocal
|
package ansiblelocal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/mitchellh/packer/packer"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/mitchellh/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testConfig() map[string]interface{} {
|
func testConfig() map[string]interface{} {
|
||||||
|
@ -36,7 +39,7 @@ func TestProvisionerPrepare_Defaults(t *testing.T) {
|
||||||
t.Fatalf("err: %s", err)
|
t.Fatalf("err: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.config.StagingDir != DefaultStagingDir {
|
if !strings.HasPrefix(filepath.ToSlash(p.config.StagingDir), DefaultStagingDir) {
|
||||||
t.Fatalf("unexpected staging dir %s, expected %s",
|
t.Fatalf("unexpected staging dir %s, expected %s",
|
||||||
p.config.StagingDir, DefaultStagingDir)
|
p.config.StagingDir, DefaultStagingDir)
|
||||||
}
|
}
|
||||||
|
|
|
@ -280,20 +280,25 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
|
||||||
|
|
||||||
err = p.executeChef(ui, comm, configPath, jsonPath)
|
err = p.executeChef(ui, comm, configPath, jsonPath)
|
||||||
|
|
||||||
knifeConfigPath, err2 := p.createKnifeConfig(
|
if !(p.config.SkipCleanNode && p.config.SkipCleanClient) {
|
||||||
|
|
||||||
|
knifeConfigPath, knifeErr := p.createKnifeConfig(
|
||||||
ui, comm, nodeName, serverUrl, p.config.ClientKey, p.config.SslVerifyMode)
|
ui, comm, nodeName, serverUrl, p.config.ClientKey, p.config.SslVerifyMode)
|
||||||
if err2 != nil {
|
|
||||||
return fmt.Errorf("Error creating knife config on node: %s", err2)
|
if knifeErr != nil {
|
||||||
|
return fmt.Errorf("Error creating knife config on node: %s", knifeErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !p.config.SkipCleanNode {
|
if !p.config.SkipCleanNode {
|
||||||
if err2 := p.cleanNode(ui, comm, nodeName, knifeConfigPath); err2 != nil {
|
if err := p.cleanNode(ui, comm, nodeName, knifeConfigPath); err != nil {
|
||||||
return fmt.Errorf("Error cleaning up chef node: %s", err2)
|
return fmt.Errorf("Error cleaning up chef node: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !p.config.SkipCleanClient {
|
if !p.config.SkipCleanClient {
|
||||||
if err2 := p.cleanClient(ui, comm, nodeName, knifeConfigPath); err2 != nil {
|
if err := p.cleanClient(ui, comm, nodeName, knifeConfigPath); err != nil {
|
||||||
return fmt.Errorf("Error cleaning up chef client: %s", err2)
|
return fmt.Errorf("Error cleaning up chef client: %s", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ type elevatedOptions struct {
|
||||||
|
|
||||||
var elevatedTemplate = template.Must(template.New("ElevatedCommand").Parse(`
|
var elevatedTemplate = template.Must(template.New("ElevatedCommand").Parse(`
|
||||||
$name = "{{.TaskName}}"
|
$name = "{{.TaskName}}"
|
||||||
$log = "$env:TEMP\$name.out"
|
$log = "$env:SystemRoot\Temp\$name.out"
|
||||||
$s = New-Object -ComObject "Schedule.Service"
|
$s = New-Object -ComObject "Schedule.Service"
|
||||||
$s.Connect()
|
$s.Connect()
|
||||||
$t = $s.NewTask($null)
|
$t = $s.NewTask($null)
|
||||||
|
@ -53,7 +53,7 @@ $t.XmlText = @'
|
||||||
<Actions Context="Author">
|
<Actions Context="Author">
|
||||||
<Exec>
|
<Exec>
|
||||||
<Command>cmd</Command>
|
<Command>cmd</Command>
|
||||||
<Arguments>/c powershell.exe -EncodedCommand {{.EncodedCommand}} > %TEMP%\{{.TaskName}}.out 2>&1</Arguments>
|
<Arguments>/c powershell.exe -EncodedCommand {{.EncodedCommand}} > %SYSTEMROOT%\Temp\{{.TaskName}}.out 2>&1</Arguments>
|
||||||
</Exec>
|
</Exec>
|
||||||
</Actions>
|
</Actions>
|
||||||
</Task>
|
</Task>
|
||||||
|
@ -81,5 +81,8 @@ do {
|
||||||
}
|
}
|
||||||
} while (!($t.state -eq 3))
|
} while (!($t.state -eq 3))
|
||||||
$result = $t.LastTaskResult
|
$result = $t.LastTaskResult
|
||||||
|
if (Test-Path $log) {
|
||||||
|
Remove-Item $log -Force -ErrorAction SilentlyContinue | Out-Null
|
||||||
|
}
|
||||||
[System.Runtime.Interopservices.Marshal]::ReleaseComObject($s) | Out-Null
|
[System.Runtime.Interopservices.Marshal]::ReleaseComObject($s) | Out-Null
|
||||||
exit $result`))
|
exit $result`))
|
||||||
|
|
|
@ -32,11 +32,11 @@ func TestProvisionerPrepare_Defaults(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.config.RestartTimeout != 5*time.Minute {
|
if p.config.RestartTimeout != 5*time.Minute {
|
||||||
t.Errorf("unexpected remote path: %s", p.config.RestartTimeout)
|
t.Errorf("unexpected restart timeout: %s", p.config.RestartTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.config.RestartCommand != "shutdown /r /f /t 0 /c \"packer restart\"" {
|
if p.config.RestartCommand != "shutdown /r /f /t 0 /c \"packer restart\"" {
|
||||||
t.Errorf("unexpected remote path: %s", p.config.RestartCommand)
|
t.Errorf("unexpected restart command: %s", p.config.RestartCommand)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ func TestProvisionerPrepare_ConfigRetryTimeout(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.config.RestartTimeout != 1*time.Minute {
|
if p.config.RestartTimeout != 1*time.Minute {
|
||||||
t.Errorf("unexpected remote path: %s", p.config.RestartTimeout)
|
t.Errorf("unexpected restart timeout: %s", p.config.RestartTimeout)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -138,12 +138,12 @@ func funcGenTimestamp(ctx *Context) interface{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
func funcGenUser(ctx *Context) interface{} {
|
func funcGenUser(ctx *Context) interface{} {
|
||||||
return func(k string) string {
|
return func(k string) (string, error) {
|
||||||
if ctx == nil || ctx.UserVariables == nil {
|
if ctx == nil || ctx.UserVariables == nil {
|
||||||
return ""
|
return "", errors.New("test")
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctx.UserVariables[k]
|
return ctx.UserVariables[k], nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,6 +41,7 @@ type clientRequest struct {
|
||||||
|
|
||||||
func (c *clientRequest) Transport(endpoint *Endpoint) error {
|
func (c *clientRequest) Transport(endpoint *Endpoint) error {
|
||||||
transport := &http.Transport{
|
transport := &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
TLSClientConfig: &tls.Config{
|
TLSClientConfig: &tls.Config{
|
||||||
InsecureSkipVerify: endpoint.Insecure,
|
InsecureSkipVerify: endpoint.Insecure,
|
||||||
},
|
},
|
||||||
|
|
|
@ -0,0 +1,202 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
|
@ -0,0 +1,23 @@
|
||||||
|
# ssh-agent
|
||||||
|
|
||||||
|
Create a new [agent.Agent](https://godoc.org/golang.org/x/crypto/ssh/agent#Agent) on any type of OS (so including Windows) from any [Go](https://golang.org) application.
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
When compiled for Windows, it will only support [Pageant](http://the.earth.li/~sgtatham/putty/0.66/htmldoc/Chapter9.html#pageant) as the SSH authentication agent.
|
||||||
|
|
||||||
|
## Credits
|
||||||
|
|
||||||
|
Big thanks to [Давид Мзареулян (David Mzareulyan)](https://github.com/davidmz) for creating the [go-pageant](https://github.com/davidmz/go-pageant) package!
|
||||||
|
|
||||||
|
## Issues
|
||||||
|
|
||||||
|
If you have an issue: report it on the [issue tracker](https://github.com/xanzy/ssh-agent/issues)
|
||||||
|
|
||||||
|
## Author
|
||||||
|
|
||||||
|
Sander van Harmelen (<sander@xanzy.io>)
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
The files `pageant_windows.go` and `sshagent_windows.go` have their own license (see file headers). The rest of this package is licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at <http://www.apache.org/licenses/LICENSE-2.0>
|
|
@ -0,0 +1,146 @@
|
||||||
|
//
|
||||||
|
// Copyright (c) 2014 David Mzareulyan
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software
|
||||||
|
// and associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||||
|
// including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||||
|
// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be included in all copies or substantial
|
||||||
|
// portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
|
||||||
|
// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
//
|
||||||
|
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package sshagent
|
||||||
|
|
||||||
|
// see https://github.com/Yasushi/putty/blob/master/windows/winpgntc.c#L155
|
||||||
|
// see https://github.com/paramiko/paramiko/blob/master/paramiko/win_pageant.py
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
. "syscall"
|
||||||
|
. "unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Maximum size of message can be sent to pageant
|
||||||
|
const MaxMessageLen = 8192
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrPageantNotFound = errors.New("pageant process not found")
|
||||||
|
ErrSendMessage = errors.New("error sending message")
|
||||||
|
|
||||||
|
ErrMessageTooLong = errors.New("message too long")
|
||||||
|
ErrInvalidMessageFormat = errors.New("invalid message format")
|
||||||
|
ErrResponseTooLong = errors.New("response too long")
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
agentCopydataID = 0x804e50ba
|
||||||
|
wmCopydata = 74
|
||||||
|
)
|
||||||
|
|
||||||
|
type copyData struct {
|
||||||
|
dwData uintptr
|
||||||
|
cbData uint32
|
||||||
|
lpData Pointer
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
lock sync.Mutex
|
||||||
|
|
||||||
|
winFindWindow = winAPI("user32.dll", "FindWindowW")
|
||||||
|
winGetCurrentThreadID = winAPI("kernel32.dll", "GetCurrentThreadId")
|
||||||
|
winSendMessage = winAPI("user32.dll", "SendMessageW")
|
||||||
|
)
|
||||||
|
|
||||||
|
func winAPI(dllName, funcName string) func(...uintptr) (uintptr, uintptr, error) {
|
||||||
|
proc := MustLoadDLL(dllName).MustFindProc(funcName)
|
||||||
|
return func(a ...uintptr) (uintptr, uintptr, error) { return proc.Call(a...) }
|
||||||
|
}
|
||||||
|
|
||||||
|
// Available returns true if Pageant is running
|
||||||
|
func Available() bool { return pageantWindow() != 0 }
|
||||||
|
|
||||||
|
// Query sends message msg to Pageant and returns response or error.
|
||||||
|
// 'msg' is raw agent request with length prefix
|
||||||
|
// Response is raw agent response with length prefix
|
||||||
|
func query(msg []byte) ([]byte, error) {
|
||||||
|
if len(msg) > MaxMessageLen {
|
||||||
|
return nil, ErrMessageTooLong
|
||||||
|
}
|
||||||
|
|
||||||
|
msgLen := binary.BigEndian.Uint32(msg[:4])
|
||||||
|
if len(msg) != int(msgLen)+4 {
|
||||||
|
return nil, ErrInvalidMessageFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
lock.Lock()
|
||||||
|
defer lock.Unlock()
|
||||||
|
|
||||||
|
paWin := pageantWindow()
|
||||||
|
|
||||||
|
if paWin == 0 {
|
||||||
|
return nil, ErrPageantNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
thID, _, _ := winGetCurrentThreadID()
|
||||||
|
mapName := fmt.Sprintf("PageantRequest%08x", thID)
|
||||||
|
pMapName, _ := UTF16PtrFromString(mapName)
|
||||||
|
|
||||||
|
mmap, err := CreateFileMapping(InvalidHandle, nil, PAGE_READWRITE, 0, MaxMessageLen+4, pMapName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer CloseHandle(mmap)
|
||||||
|
|
||||||
|
ptr, err := MapViewOfFile(mmap, FILE_MAP_WRITE, 0, 0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer UnmapViewOfFile(ptr)
|
||||||
|
|
||||||
|
mmSlice := (*(*[MaxMessageLen]byte)(Pointer(ptr)))[:]
|
||||||
|
|
||||||
|
copy(mmSlice, msg)
|
||||||
|
|
||||||
|
mapNameBytesZ := append([]byte(mapName), 0)
|
||||||
|
|
||||||
|
cds := copyData{
|
||||||
|
dwData: agentCopydataID,
|
||||||
|
cbData: uint32(len(mapNameBytesZ)),
|
||||||
|
lpData: Pointer(&(mapNameBytesZ[0])),
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, _, _ := winSendMessage(paWin, wmCopydata, 0, uintptr(Pointer(&cds)))
|
||||||
|
|
||||||
|
if resp == 0 {
|
||||||
|
return nil, ErrSendMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
respLen := binary.BigEndian.Uint32(mmSlice[:4])
|
||||||
|
if respLen > MaxMessageLen-4 {
|
||||||
|
return nil, ErrResponseTooLong
|
||||||
|
}
|
||||||
|
|
||||||
|
respData := make([]byte, respLen+4)
|
||||||
|
copy(respData, mmSlice)
|
||||||
|
|
||||||
|
return respData, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func pageantWindow() uintptr {
|
||||||
|
nameP, _ := UTF16PtrFromString("Pageant")
|
||||||
|
h, _, _ := winFindWindow(uintptr(Pointer(nameP)), uintptr(Pointer(nameP)))
|
||||||
|
return h
|
||||||
|
}
|
|
@ -0,0 +1,49 @@
|
||||||
|
//
|
||||||
|
// Copyright 2015, Sander van Harmelen
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package sshagent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/ssh/agent"
|
||||||
|
)
|
||||||
|
|
||||||
|
// New returns a new agent.Agent that uses a unix socket
|
||||||
|
func New() (agent.Agent, net.Conn, error) {
|
||||||
|
if !Available() {
|
||||||
|
return nil, nil, errors.New("SSH agent requested but SSH_AUTH_SOCK not-specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
sshAuthSock := os.Getenv("SSH_AUTH_SOCK")
|
||||||
|
|
||||||
|
conn, err := net.Dial("unix", sshAuthSock)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("Error connecting to SSH_AUTH_SOCK: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return agent.NewClient(conn), conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Available returns true is a auth socket is defined
|
||||||
|
func Available() bool {
|
||||||
|
return os.Getenv("SSH_AUTH_SOCK") != ""
|
||||||
|
}
|
|
@ -0,0 +1,80 @@
|
||||||
|
//
|
||||||
|
// Copyright (c) 2014 David Mzareulyan
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software
|
||||||
|
// and associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||||
|
// including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||||
|
// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be included in all copies or substantial
|
||||||
|
// portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
|
||||||
|
// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
//
|
||||||
|
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package sshagent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/ssh/agent"
|
||||||
|
)
|
||||||
|
|
||||||
|
// New returns a new agent.Agent and the (custom) connection it uses
|
||||||
|
// to communicate with a running pagent.exe instance (see README.md)
|
||||||
|
func New() (agent.Agent, net.Conn, error) {
|
||||||
|
if !Available() {
|
||||||
|
return nil, nil, errors.New("SSH agent requested but Pageant not running")
|
||||||
|
}
|
||||||
|
|
||||||
|
return agent.NewClient(&conn{}), nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type conn struct {
|
||||||
|
sync.Mutex
|
||||||
|
buf []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) Close() {
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
c.buf = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) Write(p []byte) (int, error) {
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
|
resp, err := query(p)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.buf = append(c.buf, resp...)
|
||||||
|
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) Read(p []byte) (int, error) {
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
|
if len(c.buf) == 0 {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
n := copy(p, c.buf)
|
||||||
|
c.buf = c.buf[n:]
|
||||||
|
|
||||||
|
return n, nil
|
||||||
|
}
|
|
@ -461,8 +461,8 @@ func (m *mux) newChannel(chanType string, direction channelDirection, extraData
|
||||||
pending: newBuffer(),
|
pending: newBuffer(),
|
||||||
extPending: newBuffer(),
|
extPending: newBuffer(),
|
||||||
direction: direction,
|
direction: direction,
|
||||||
incomingRequests: make(chan *Request, 16),
|
incomingRequests: make(chan *Request, chanSize),
|
||||||
msg: make(chan interface{}, 16),
|
msg: make(chan interface{}, chanSize),
|
||||||
chanType: chanType,
|
chanType: chanType,
|
||||||
extraData: extraData,
|
extraData: extraData,
|
||||||
mux: m,
|
mux: m,
|
||||||
|
|
|
@ -135,6 +135,7 @@ const prefixLen = 5
|
||||||
type streamPacketCipher struct {
|
type streamPacketCipher struct {
|
||||||
mac hash.Hash
|
mac hash.Hash
|
||||||
cipher cipher.Stream
|
cipher cipher.Stream
|
||||||
|
etm bool
|
||||||
|
|
||||||
// The following members are to avoid per-packet allocations.
|
// The following members are to avoid per-packet allocations.
|
||||||
prefix [prefixLen]byte
|
prefix [prefixLen]byte
|
||||||
|
@ -150,7 +151,14 @@ func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, err
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var encryptedPaddingLength [1]byte
|
||||||
|
if s.mac != nil && s.etm {
|
||||||
|
copy(encryptedPaddingLength[:], s.prefix[4:5])
|
||||||
|
s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5])
|
||||||
|
} else {
|
||||||
s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
|
s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
|
||||||
|
}
|
||||||
|
|
||||||
length := binary.BigEndian.Uint32(s.prefix[0:4])
|
length := binary.BigEndian.Uint32(s.prefix[0:4])
|
||||||
paddingLength := uint32(s.prefix[4])
|
paddingLength := uint32(s.prefix[4])
|
||||||
|
|
||||||
|
@ -159,7 +167,12 @@ func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, err
|
||||||
s.mac.Reset()
|
s.mac.Reset()
|
||||||
binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
|
binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
|
||||||
s.mac.Write(s.seqNumBytes[:])
|
s.mac.Write(s.seqNumBytes[:])
|
||||||
|
if s.etm {
|
||||||
|
s.mac.Write(s.prefix[:4])
|
||||||
|
s.mac.Write(encryptedPaddingLength[:])
|
||||||
|
} else {
|
||||||
s.mac.Write(s.prefix[:])
|
s.mac.Write(s.prefix[:])
|
||||||
|
}
|
||||||
macSize = uint32(s.mac.Size())
|
macSize = uint32(s.mac.Size())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,10 +197,17 @@ func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, err
|
||||||
}
|
}
|
||||||
mac := s.packetData[length-1:]
|
mac := s.packetData[length-1:]
|
||||||
data := s.packetData[:length-1]
|
data := s.packetData[:length-1]
|
||||||
|
|
||||||
|
if s.mac != nil && s.etm {
|
||||||
|
s.mac.Write(data)
|
||||||
|
}
|
||||||
|
|
||||||
s.cipher.XORKeyStream(data, data)
|
s.cipher.XORKeyStream(data, data)
|
||||||
|
|
||||||
if s.mac != nil {
|
if s.mac != nil {
|
||||||
|
if !s.etm {
|
||||||
s.mac.Write(data)
|
s.mac.Write(data)
|
||||||
|
}
|
||||||
s.macResult = s.mac.Sum(s.macResult[:0])
|
s.macResult = s.mac.Sum(s.macResult[:0])
|
||||||
if subtle.ConstantTimeCompare(s.macResult, mac) != 1 {
|
if subtle.ConstantTimeCompare(s.macResult, mac) != 1 {
|
||||||
return nil, errors.New("ssh: MAC failure")
|
return nil, errors.New("ssh: MAC failure")
|
||||||
|
@ -203,7 +223,13 @@ func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Rea
|
||||||
return errors.New("ssh: packet too large")
|
return errors.New("ssh: packet too large")
|
||||||
}
|
}
|
||||||
|
|
||||||
paddingLength := packetSizeMultiple - (prefixLen+len(packet))%packetSizeMultiple
|
aadlen := 0
|
||||||
|
if s.mac != nil && s.etm {
|
||||||
|
// packet length is not encrypted for EtM modes
|
||||||
|
aadlen = 4
|
||||||
|
}
|
||||||
|
|
||||||
|
paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple
|
||||||
if paddingLength < 4 {
|
if paddingLength < 4 {
|
||||||
paddingLength += packetSizeMultiple
|
paddingLength += packetSizeMultiple
|
||||||
}
|
}
|
||||||
|
@ -220,15 +246,37 @@ func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Rea
|
||||||
s.mac.Reset()
|
s.mac.Reset()
|
||||||
binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
|
binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
|
||||||
s.mac.Write(s.seqNumBytes[:])
|
s.mac.Write(s.seqNumBytes[:])
|
||||||
|
|
||||||
|
if s.etm {
|
||||||
|
// For EtM algorithms, the packet length must stay unencrypted,
|
||||||
|
// but the following data (padding length) must be encrypted
|
||||||
|
s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5])
|
||||||
|
}
|
||||||
|
|
||||||
s.mac.Write(s.prefix[:])
|
s.mac.Write(s.prefix[:])
|
||||||
|
|
||||||
|
if !s.etm {
|
||||||
|
// For non-EtM algorithms, the algorithm is applied on unencrypted data
|
||||||
s.mac.Write(packet)
|
s.mac.Write(packet)
|
||||||
s.mac.Write(padding)
|
s.mac.Write(padding)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !(s.mac != nil && s.etm) {
|
||||||
|
// For EtM algorithms, the padding length has already been encrypted
|
||||||
|
// and the packet length must remain unencrypted
|
||||||
s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
|
s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
|
||||||
|
}
|
||||||
|
|
||||||
s.cipher.XORKeyStream(packet, packet)
|
s.cipher.XORKeyStream(packet, packet)
|
||||||
s.cipher.XORKeyStream(padding, padding)
|
s.cipher.XORKeyStream(padding, padding)
|
||||||
|
|
||||||
|
if s.mac != nil && s.etm {
|
||||||
|
// For EtM algorithms, packet and padding must be encrypted
|
||||||
|
s.mac.Write(packet)
|
||||||
|
s.mac.Write(padding)
|
||||||
|
}
|
||||||
|
|
||||||
if _, err := w.Write(s.prefix[:]); err != nil {
|
if _, err := w.Write(s.prefix[:]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,7 +40,7 @@ func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ch = make(chan NewChannel, 16)
|
ch = make(chan NewChannel, chanSize)
|
||||||
c.channelHandlers[channelType] = ch
|
c.channelHandlers[channelType] = ch
|
||||||
return ch
|
return ch
|
||||||
}
|
}
|
||||||
|
@ -97,13 +97,11 @@ func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) e
|
||||||
c.transport = newClientTransport(
|
c.transport = newClientTransport(
|
||||||
newTransport(c.sshConn.conn, config.Rand, true /* is client */),
|
newTransport(c.sshConn.conn, config.Rand, true /* is client */),
|
||||||
c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr())
|
c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr())
|
||||||
if err := c.transport.requestInitialKeyChange(); err != nil {
|
if err := c.transport.waitSession(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// We just did the key change, so the session ID is established.
|
|
||||||
c.sessionID = c.transport.getSessionID()
|
c.sessionID = c.transport.getSessionID()
|
||||||
|
|
||||||
return c.clientAuthenticate(config)
|
return c.clientAuthenticate(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,8 +30,10 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
|
||||||
// then any untried methods suggested by the server.
|
// then any untried methods suggested by the server.
|
||||||
tried := make(map[string]bool)
|
tried := make(map[string]bool)
|
||||||
var lastMethods []string
|
var lastMethods []string
|
||||||
|
|
||||||
|
sessionID := c.transport.getSessionID()
|
||||||
for auth := AuthMethod(new(noneAuth)); auth != nil; {
|
for auth := AuthMethod(new(noneAuth)); auth != nil; {
|
||||||
ok, methods, err := auth.auth(c.transport.getSessionID(), config.User, c.transport, config.Rand)
|
ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,7 @@ var supportedHostKeyAlgos = []string{
|
||||||
// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed
|
// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed
|
||||||
// because they have reached the end of their useful life.
|
// because they have reached the end of their useful life.
|
||||||
var supportedMACs = []string{
|
var supportedMACs = []string{
|
||||||
"hmac-sha2-256", "hmac-sha1", "hmac-sha1-96",
|
"hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96",
|
||||||
}
|
}
|
||||||
|
|
||||||
var supportedCompressions = []string{compressionNone}
|
var supportedCompressions = []string{compressionNone}
|
||||||
|
@ -104,6 +104,21 @@ type directionAlgorithms struct {
|
||||||
Compression string
|
Compression string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// rekeyBytes returns a rekeying intervals in bytes.
|
||||||
|
func (a *directionAlgorithms) rekeyBytes() int64 {
|
||||||
|
// According to RFC4344 block ciphers should rekey after
|
||||||
|
// 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is
|
||||||
|
// 128.
|
||||||
|
switch a.Cipher {
|
||||||
|
case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID:
|
||||||
|
return 16 * (1 << 32)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// For others, stick with RFC4253 recommendation to rekey after 1 Gb of data.
|
||||||
|
return 1 << 30
|
||||||
|
}
|
||||||
|
|
||||||
type algorithms struct {
|
type algorithms struct {
|
||||||
kex string
|
kex string
|
||||||
hostKey string
|
hostKey string
|
||||||
|
|
|
@ -19,6 +19,11 @@ import (
|
||||||
// messages are wrong when using ECDH.
|
// messages are wrong when using ECDH.
|
||||||
const debugHandshake = false
|
const debugHandshake = false
|
||||||
|
|
||||||
|
// chanSize sets the amount of buffering SSH connections. This is
|
||||||
|
// primarily for testing: setting chanSize=0 uncovers deadlocks more
|
||||||
|
// quickly.
|
||||||
|
const chanSize = 16
|
||||||
|
|
||||||
// keyingTransport is a packet based transport that supports key
|
// keyingTransport is a packet based transport that supports key
|
||||||
// changes. It need not be thread-safe. It should pass through
|
// changes. It need not be thread-safe. It should pass through
|
||||||
// msgNewKeys in both directions.
|
// msgNewKeys in both directions.
|
||||||
|
@ -53,34 +58,58 @@ type handshakeTransport struct {
|
||||||
incoming chan []byte
|
incoming chan []byte
|
||||||
readError error
|
readError error
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
writeError error
|
||||||
|
sentInitPacket []byte
|
||||||
|
sentInitMsg *kexInitMsg
|
||||||
|
pendingPackets [][]byte // Used when a key exchange is in progress.
|
||||||
|
|
||||||
|
// If the read loop wants to schedule a kex, it pings this
|
||||||
|
// channel, and the write loop will send out a kex
|
||||||
|
// message.
|
||||||
|
requestKex chan struct{}
|
||||||
|
|
||||||
|
// If the other side requests or confirms a kex, its kexInit
|
||||||
|
// packet is sent here for the write loop to find it.
|
||||||
|
startKex chan *pendingKex
|
||||||
|
|
||||||
// data for host key checking
|
// data for host key checking
|
||||||
hostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
|
hostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
|
||||||
dialAddress string
|
dialAddress string
|
||||||
remoteAddr net.Addr
|
remoteAddr net.Addr
|
||||||
|
|
||||||
readSinceKex uint64
|
// Algorithms agreed in the last key exchange.
|
||||||
|
algorithms *algorithms
|
||||||
|
|
||||||
// Protects the writing side of the connection
|
readPacketsLeft uint32
|
||||||
mu sync.Mutex
|
readBytesLeft int64
|
||||||
cond *sync.Cond
|
|
||||||
sentInitPacket []byte
|
writePacketsLeft uint32
|
||||||
sentInitMsg *kexInitMsg
|
writeBytesLeft int64
|
||||||
writtenSinceKex uint64
|
|
||||||
writeError error
|
|
||||||
|
|
||||||
// The session ID or nil if first kex did not complete yet.
|
// The session ID or nil if first kex did not complete yet.
|
||||||
sessionID []byte
|
sessionID []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type pendingKex struct {
|
||||||
|
otherInit []byte
|
||||||
|
done chan error
|
||||||
|
}
|
||||||
|
|
||||||
func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport {
|
func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport {
|
||||||
t := &handshakeTransport{
|
t := &handshakeTransport{
|
||||||
conn: conn,
|
conn: conn,
|
||||||
serverVersion: serverVersion,
|
serverVersion: serverVersion,
|
||||||
clientVersion: clientVersion,
|
clientVersion: clientVersion,
|
||||||
incoming: make(chan []byte, 16),
|
incoming: make(chan []byte, chanSize),
|
||||||
|
requestKex: make(chan struct{}, 1),
|
||||||
|
startKex: make(chan *pendingKex, 1),
|
||||||
|
|
||||||
config: config,
|
config: config,
|
||||||
}
|
}
|
||||||
t.cond = sync.NewCond(&t.mu)
|
|
||||||
|
// We always start with a mandatory key exchange.
|
||||||
|
t.requestKex <- struct{}{}
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,6 +124,7 @@ func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byt
|
||||||
t.hostKeyAlgorithms = supportedHostKeyAlgos
|
t.hostKeyAlgorithms = supportedHostKeyAlgos
|
||||||
}
|
}
|
||||||
go t.readLoop()
|
go t.readLoop()
|
||||||
|
go t.kexLoop()
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,6 +132,7 @@ func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byt
|
||||||
t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
|
t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
|
||||||
t.hostKeys = config.hostKeys
|
t.hostKeys = config.hostKeys
|
||||||
go t.readLoop()
|
go t.readLoop()
|
||||||
|
go t.kexLoop()
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,6 +140,20 @@ func (t *handshakeTransport) getSessionID() []byte {
|
||||||
return t.sessionID
|
return t.sessionID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// waitSession waits for the session to be established. This should be
|
||||||
|
// the first thing to call after instantiating handshakeTransport.
|
||||||
|
func (t *handshakeTransport) waitSession() error {
|
||||||
|
p, err := t.readPacket()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if p[0] != msgNewKeys {
|
||||||
|
return fmt.Errorf("ssh: first packet should be msgNewKeys")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (t *handshakeTransport) id() string {
|
func (t *handshakeTransport) id() string {
|
||||||
if len(t.hostKeys) > 0 {
|
if len(t.hostKeys) > 0 {
|
||||||
return "server"
|
return "server"
|
||||||
|
@ -116,6 +161,20 @@ func (t *handshakeTransport) id() string {
|
||||||
return "client"
|
return "client"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *handshakeTransport) printPacket(p []byte, write bool) {
|
||||||
|
action := "got"
|
||||||
|
if write {
|
||||||
|
action = "sent"
|
||||||
|
}
|
||||||
|
|
||||||
|
if p[0] == msgChannelData || p[0] == msgChannelExtendedData {
|
||||||
|
log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p))
|
||||||
|
} else {
|
||||||
|
msg, err := decode(p)
|
||||||
|
log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (t *handshakeTransport) readPacket() ([]byte, error) {
|
func (t *handshakeTransport) readPacket() ([]byte, error) {
|
||||||
p, ok := <-t.incoming
|
p, ok := <-t.incoming
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -125,8 +184,10 @@ func (t *handshakeTransport) readPacket() ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *handshakeTransport) readLoop() {
|
func (t *handshakeTransport) readLoop() {
|
||||||
|
first := true
|
||||||
for {
|
for {
|
||||||
p, err := t.readOnePacket()
|
p, err := t.readOnePacket(first)
|
||||||
|
first = false
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.readError = err
|
t.readError = err
|
||||||
close(t.incoming)
|
close(t.incoming)
|
||||||
|
@ -138,67 +199,204 @@ func (t *handshakeTransport) readLoop() {
|
||||||
t.incoming <- p
|
t.incoming <- p
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we can't read, declare the writing part dead too.
|
// Stop writers too.
|
||||||
|
t.recordWriteError(t.readError)
|
||||||
|
|
||||||
|
// Unblock the writer should it wait for this.
|
||||||
|
close(t.startKex)
|
||||||
|
|
||||||
|
// Don't close t.requestKex; it's also written to from writePacket.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *handshakeTransport) pushPacket(p []byte) error {
|
||||||
|
if debugHandshake {
|
||||||
|
t.printPacket(p, true)
|
||||||
|
}
|
||||||
|
return t.conn.writePacket(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *handshakeTransport) getWriteError() error {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
if t.writeError == nil {
|
return t.writeError
|
||||||
t.writeError = t.readError
|
|
||||||
}
|
|
||||||
t.cond.Broadcast()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *handshakeTransport) readOnePacket() ([]byte, error) {
|
func (t *handshakeTransport) recordWriteError(err error) {
|
||||||
if t.readSinceKex > t.config.RekeyThreshold {
|
t.mu.Lock()
|
||||||
if err := t.requestKeyChange(); err != nil {
|
defer t.mu.Unlock()
|
||||||
return nil, err
|
if t.writeError == nil && err != nil {
|
||||||
|
t.writeError = err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *handshakeTransport) requestKeyExchange() {
|
||||||
|
select {
|
||||||
|
case t.requestKex <- struct{}{}:
|
||||||
|
default:
|
||||||
|
// something already requested a kex, so do nothing.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *handshakeTransport) kexLoop() {
|
||||||
|
|
||||||
|
write:
|
||||||
|
for t.getWriteError() == nil {
|
||||||
|
var request *pendingKex
|
||||||
|
var sent bool
|
||||||
|
|
||||||
|
for request == nil || !sent {
|
||||||
|
var ok bool
|
||||||
|
select {
|
||||||
|
case request, ok = <-t.startKex:
|
||||||
|
if !ok {
|
||||||
|
break write
|
||||||
|
}
|
||||||
|
case <-t.requestKex:
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if !sent {
|
||||||
|
if err := t.sendKexInit(); err != nil {
|
||||||
|
t.recordWriteError(err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sent = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := t.getWriteError(); err != nil {
|
||||||
|
if request != nil {
|
||||||
|
request.done <- err
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're not servicing t.requestKex, but that is OK:
|
||||||
|
// we never block on sending to t.requestKex.
|
||||||
|
|
||||||
|
// We're not servicing t.startKex, but the remote end
|
||||||
|
// has just sent us a kexInitMsg, so it can't send
|
||||||
|
// another key change request, until we close the done
|
||||||
|
// channel on the pendingKex request.
|
||||||
|
|
||||||
|
err := t.enterKeyExchange(request.otherInit)
|
||||||
|
|
||||||
|
t.mu.Lock()
|
||||||
|
t.writeError = err
|
||||||
|
t.sentInitPacket = nil
|
||||||
|
t.sentInitMsg = nil
|
||||||
|
t.writePacketsLeft = packetRekeyThreshold
|
||||||
|
if t.config.RekeyThreshold > 0 {
|
||||||
|
t.writeBytesLeft = int64(t.config.RekeyThreshold)
|
||||||
|
} else if t.algorithms != nil {
|
||||||
|
t.writeBytesLeft = t.algorithms.w.rekeyBytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// we have completed the key exchange. Since the
|
||||||
|
// reader is still blocked, it is safe to clear out
|
||||||
|
// the requestKex channel. This avoids the situation
|
||||||
|
// where: 1) we consumed our own request for the
|
||||||
|
// initial kex, and 2) the kex from the remote side
|
||||||
|
// caused another send on the requestKex channel,
|
||||||
|
clear:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-t.requestKex:
|
||||||
|
//
|
||||||
|
default:
|
||||||
|
break clear
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
request.done <- t.writeError
|
||||||
|
|
||||||
|
// kex finished. Push packets that we received while
|
||||||
|
// the kex was in progress. Don't look at t.startKex
|
||||||
|
// and don't increment writtenSinceKex: if we trigger
|
||||||
|
// another kex while we are still busy with the last
|
||||||
|
// one, things will become very confusing.
|
||||||
|
for _, p := range t.pendingPackets {
|
||||||
|
t.writeError = t.pushPacket(p)
|
||||||
|
if t.writeError != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.pendingPackets = t.pendingPackets[:0]
|
||||||
|
t.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// drain startKex channel. We don't service t.requestKex
|
||||||
|
// because nobody does blocking sends there.
|
||||||
|
go func() {
|
||||||
|
for init := range t.startKex {
|
||||||
|
init.done <- t.writeError
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Unblock reader.
|
||||||
|
t.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// The protocol uses uint32 for packet counters, so we can't let them
|
||||||
|
// reach 1<<32. We will actually read and write more packets than
|
||||||
|
// this, though: the other side may send more packets, and after we
|
||||||
|
// hit this limit on writing we will send a few more packets for the
|
||||||
|
// key exchange itself.
|
||||||
|
const packetRekeyThreshold = (1 << 31)
|
||||||
|
|
||||||
|
func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) {
|
||||||
p, err := t.conn.readPacket()
|
p, err := t.conn.readPacket()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
t.readSinceKex += uint64(len(p))
|
if t.readPacketsLeft > 0 {
|
||||||
if debugHandshake {
|
t.readPacketsLeft--
|
||||||
if p[0] == msgChannelData || p[0] == msgChannelExtendedData {
|
|
||||||
log.Printf("%s got data (packet %d bytes)", t.id(), len(p))
|
|
||||||
} else {
|
} else {
|
||||||
msg, err := decode(p)
|
t.requestKeyExchange()
|
||||||
log.Printf("%s got %T %v (%v)", t.id(), msg, msg, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if t.readBytesLeft > 0 {
|
||||||
|
t.readBytesLeft -= int64(len(p))
|
||||||
|
} else {
|
||||||
|
t.requestKeyExchange()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if debugHandshake {
|
||||||
|
t.printPacket(p, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
if first && p[0] != msgKexInit {
|
||||||
|
return nil, fmt.Errorf("ssh: first packet should be msgKexInit")
|
||||||
|
}
|
||||||
|
|
||||||
if p[0] != msgKexInit {
|
if p[0] != msgKexInit {
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
t.mu.Lock()
|
|
||||||
|
|
||||||
firstKex := t.sessionID == nil
|
firstKex := t.sessionID == nil
|
||||||
|
|
||||||
err = t.enterKeyExchangeLocked(p)
|
kex := pendingKex{
|
||||||
if err != nil {
|
done: make(chan error, 1),
|
||||||
// drop connection
|
otherInit: p,
|
||||||
t.conn.Close()
|
|
||||||
t.writeError = err
|
|
||||||
}
|
}
|
||||||
|
t.startKex <- &kex
|
||||||
|
err = <-kex.done
|
||||||
|
|
||||||
if debugHandshake {
|
if debugHandshake {
|
||||||
log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err)
|
log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unblock writers.
|
|
||||||
t.sentInitMsg = nil
|
|
||||||
t.sentInitPacket = nil
|
|
||||||
t.cond.Broadcast()
|
|
||||||
t.writtenSinceKex = 0
|
|
||||||
t.mu.Unlock()
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
t.readSinceKex = 0
|
t.readPacketsLeft = packetRekeyThreshold
|
||||||
|
if t.config.RekeyThreshold > 0 {
|
||||||
|
t.readBytesLeft = int64(t.config.RekeyThreshold)
|
||||||
|
} else {
|
||||||
|
t.readBytesLeft = t.algorithms.r.rekeyBytes()
|
||||||
|
}
|
||||||
|
|
||||||
// By default, a key exchange is hidden from higher layers by
|
// By default, a key exchange is hidden from higher layers by
|
||||||
// translating it into msgIgnore.
|
// translating it into msgIgnore.
|
||||||
|
@ -213,61 +411,16 @@ func (t *handshakeTransport) readOnePacket() ([]byte, error) {
|
||||||
return successPacket, nil
|
return successPacket, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// keyChangeCategory describes whether a key exchange is the first on a
|
// sendKexInit sends a key change message.
|
||||||
// connection, or a subsequent one.
|
func (t *handshakeTransport) sendKexInit() error {
|
||||||
type keyChangeCategory bool
|
|
||||||
|
|
||||||
const (
|
|
||||||
firstKeyExchange keyChangeCategory = true
|
|
||||||
subsequentKeyExchange keyChangeCategory = false
|
|
||||||
)
|
|
||||||
|
|
||||||
// sendKexInit sends a key change message, and returns the message
|
|
||||||
// that was sent. After initiating the key change, all writes will be
|
|
||||||
// blocked until the change is done, and a failed key change will
|
|
||||||
// close the underlying transport. This function is safe for
|
|
||||||
// concurrent use by multiple goroutines.
|
|
||||||
func (t *handshakeTransport) sendKexInit(isFirst keyChangeCategory) error {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
// If this is the initial key change, but we already have a sessionID,
|
defer t.mu.Unlock()
|
||||||
// then do nothing because the key exchange has already completed
|
if t.sentInitMsg != nil {
|
||||||
// asynchronously.
|
|
||||||
if !isFirst || t.sessionID == nil {
|
|
||||||
_, _, err = t.sendKexInitLocked(isFirst)
|
|
||||||
}
|
|
||||||
t.mu.Unlock()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if isFirst {
|
|
||||||
if packet, err := t.readPacket(); err != nil {
|
|
||||||
return err
|
|
||||||
} else if packet[0] != msgNewKeys {
|
|
||||||
return unexpectedMessageError(msgNewKeys, packet[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *handshakeTransport) requestInitialKeyChange() error {
|
|
||||||
return t.sendKexInit(firstKeyExchange)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *handshakeTransport) requestKeyChange() error {
|
|
||||||
return t.sendKexInit(subsequentKeyExchange)
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendKexInitLocked sends a key change message. t.mu must be locked
|
|
||||||
// while this happens.
|
|
||||||
func (t *handshakeTransport) sendKexInitLocked(isFirst keyChangeCategory) (*kexInitMsg, []byte, error) {
|
|
||||||
// kexInits may be sent either in response to the other side,
|
// kexInits may be sent either in response to the other side,
|
||||||
// or because our side wants to initiate a key change, so we
|
// or because our side wants to initiate a key change, so we
|
||||||
// may have already sent a kexInit. In that case, don't send a
|
// may have already sent a kexInit. In that case, don't send a
|
||||||
// second kexInit.
|
// second kexInit.
|
||||||
if t.sentInitMsg != nil {
|
return nil
|
||||||
return t.sentInitMsg, t.sentInitPacket, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := &kexInitMsg{
|
msg := &kexInitMsg{
|
||||||
|
@ -295,53 +448,65 @@ func (t *handshakeTransport) sendKexInitLocked(isFirst keyChangeCategory) (*kexI
|
||||||
packetCopy := make([]byte, len(packet))
|
packetCopy := make([]byte, len(packet))
|
||||||
copy(packetCopy, packet)
|
copy(packetCopy, packet)
|
||||||
|
|
||||||
if err := t.conn.writePacket(packetCopy); err != nil {
|
if err := t.pushPacket(packetCopy); err != nil {
|
||||||
return nil, nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
t.sentInitMsg = msg
|
t.sentInitMsg = msg
|
||||||
t.sentInitPacket = packet
|
t.sentInitPacket = packet
|
||||||
return msg, packet, nil
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *handshakeTransport) writePacket(p []byte) error {
|
func (t *handshakeTransport) writePacket(p []byte) error {
|
||||||
t.mu.Lock()
|
|
||||||
defer t.mu.Unlock()
|
|
||||||
|
|
||||||
if t.writtenSinceKex > t.config.RekeyThreshold {
|
|
||||||
t.sendKexInitLocked(subsequentKeyExchange)
|
|
||||||
}
|
|
||||||
for t.sentInitMsg != nil && t.writeError == nil {
|
|
||||||
t.cond.Wait()
|
|
||||||
}
|
|
||||||
if t.writeError != nil {
|
|
||||||
return t.writeError
|
|
||||||
}
|
|
||||||
t.writtenSinceKex += uint64(len(p))
|
|
||||||
|
|
||||||
switch p[0] {
|
switch p[0] {
|
||||||
case msgKexInit:
|
case msgKexInit:
|
||||||
return errors.New("ssh: only handshakeTransport can send kexInit")
|
return errors.New("ssh: only handshakeTransport can send kexInit")
|
||||||
case msgNewKeys:
|
case msgNewKeys:
|
||||||
return errors.New("ssh: only handshakeTransport can send newKeys")
|
return errors.New("ssh: only handshakeTransport can send newKeys")
|
||||||
default:
|
|
||||||
return t.conn.writePacket(p)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t.mu.Lock()
|
||||||
|
defer t.mu.Unlock()
|
||||||
|
if t.writeError != nil {
|
||||||
|
return t.writeError
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.sentInitMsg != nil {
|
||||||
|
// Copy the packet so the writer can reuse the buffer.
|
||||||
|
cp := make([]byte, len(p))
|
||||||
|
copy(cp, p)
|
||||||
|
t.pendingPackets = append(t.pendingPackets, cp)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.writeBytesLeft > 0 {
|
||||||
|
t.writeBytesLeft -= int64(len(p))
|
||||||
|
} else {
|
||||||
|
t.requestKeyExchange()
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.writePacketsLeft > 0 {
|
||||||
|
t.writePacketsLeft--
|
||||||
|
} else {
|
||||||
|
t.requestKeyExchange()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := t.pushPacket(p); err != nil {
|
||||||
|
t.writeError = err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *handshakeTransport) Close() error {
|
func (t *handshakeTransport) Close() error {
|
||||||
return t.conn.Close()
|
return t.conn.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// enterKeyExchange runs the key exchange. t.mu must be held while running this.
|
func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
|
||||||
func (t *handshakeTransport) enterKeyExchangeLocked(otherInitPacket []byte) error {
|
|
||||||
if debugHandshake {
|
if debugHandshake {
|
||||||
log.Printf("%s entered key exchange", t.id())
|
log.Printf("%s entered key exchange", t.id())
|
||||||
}
|
}
|
||||||
myInit, myInitPacket, err := t.sendKexInitLocked(subsequentKeyExchange)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
otherInit := &kexInitMsg{}
|
otherInit := &kexInitMsg{}
|
||||||
if err := Unmarshal(otherInitPacket, otherInit); err != nil {
|
if err := Unmarshal(otherInitPacket, otherInit); err != nil {
|
||||||
|
@ -352,20 +517,20 @@ func (t *handshakeTransport) enterKeyExchangeLocked(otherInitPacket []byte) erro
|
||||||
clientVersion: t.clientVersion,
|
clientVersion: t.clientVersion,
|
||||||
serverVersion: t.serverVersion,
|
serverVersion: t.serverVersion,
|
||||||
clientKexInit: otherInitPacket,
|
clientKexInit: otherInitPacket,
|
||||||
serverKexInit: myInitPacket,
|
serverKexInit: t.sentInitPacket,
|
||||||
}
|
}
|
||||||
|
|
||||||
clientInit := otherInit
|
clientInit := otherInit
|
||||||
serverInit := myInit
|
serverInit := t.sentInitMsg
|
||||||
if len(t.hostKeys) == 0 {
|
if len(t.hostKeys) == 0 {
|
||||||
clientInit = myInit
|
clientInit, serverInit = serverInit, clientInit
|
||||||
serverInit = otherInit
|
|
||||||
|
|
||||||
magics.clientKexInit = myInitPacket
|
magics.clientKexInit = t.sentInitPacket
|
||||||
magics.serverKexInit = otherInitPacket
|
magics.serverKexInit = otherInitPacket
|
||||||
}
|
}
|
||||||
|
|
||||||
algs, err := findAgreedAlgorithms(clientInit, serverInit)
|
var err error
|
||||||
|
t.algorithms, err = findAgreedAlgorithms(clientInit, serverInit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -388,16 +553,16 @@ func (t *handshakeTransport) enterKeyExchangeLocked(otherInitPacket []byte) erro
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kex, ok := kexAlgoMap[algs.kex]
|
kex, ok := kexAlgoMap[t.algorithms.kex]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("ssh: unexpected key exchange algorithm %v", algs.kex)
|
return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex)
|
||||||
}
|
}
|
||||||
|
|
||||||
var result *kexResult
|
var result *kexResult
|
||||||
if len(t.hostKeys) > 0 {
|
if len(t.hostKeys) > 0 {
|
||||||
result, err = t.server(kex, algs, &magics)
|
result, err = t.server(kex, t.algorithms, &magics)
|
||||||
} else {
|
} else {
|
||||||
result, err = t.client(kex, algs, &magics)
|
result, err = t.client(kex, t.algorithms, &magics)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -409,7 +574,7 @@ func (t *handshakeTransport) enterKeyExchangeLocked(otherInitPacket []byte) erro
|
||||||
}
|
}
|
||||||
result.SessionID = t.sessionID
|
result.SessionID = t.sessionID
|
||||||
|
|
||||||
t.conn.prepareKeyChange(algs, result)
|
t.conn.prepareKeyChange(t.algorithms, result)
|
||||||
if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {
|
if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,10 +10,13 @@ import (
|
||||||
"crypto/dsa"
|
"crypto/dsa"
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"crypto/elliptic"
|
"crypto/elliptic"
|
||||||
|
"crypto/md5"
|
||||||
"crypto/rsa"
|
"crypto/rsa"
|
||||||
|
"crypto/sha256"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/asn1"
|
"encoding/asn1"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"encoding/hex"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -795,8 +798,8 @@ func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
|
||||||
P *big.Int
|
P *big.Int
|
||||||
Q *big.Int
|
Q *big.Int
|
||||||
G *big.Int
|
G *big.Int
|
||||||
Priv *big.Int
|
|
||||||
Pub *big.Int
|
Pub *big.Int
|
||||||
|
Priv *big.Int
|
||||||
}
|
}
|
||||||
rest, err := asn1.Unmarshal(der, &k)
|
rest, err := asn1.Unmarshal(der, &k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -813,9 +816,9 @@ func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
|
||||||
Q: k.Q,
|
Q: k.Q,
|
||||||
G: k.G,
|
G: k.G,
|
||||||
},
|
},
|
||||||
Y: k.Priv,
|
Y: k.Pub,
|
||||||
},
|
},
|
||||||
X: k.Pub,
|
X: k.Priv,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -878,3 +881,25 @@ func parseOpenSSHPrivateKey(key []byte) (*ed25519.PrivateKey, error) {
|
||||||
copy(pk, pk1.Priv)
|
copy(pk, pk1.Priv)
|
||||||
return &pk, nil
|
return &pk, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FingerprintLegacyMD5 returns the user presentation of the key's
|
||||||
|
// fingerprint as described by RFC 4716 section 4.
|
||||||
|
func FingerprintLegacyMD5(pubKey PublicKey) string {
|
||||||
|
md5sum := md5.Sum(pubKey.Marshal())
|
||||||
|
hexarray := make([]string, len(md5sum))
|
||||||
|
for i, c := range md5sum {
|
||||||
|
hexarray[i] = hex.EncodeToString([]byte{c})
|
||||||
|
}
|
||||||
|
return strings.Join(hexarray, ":")
|
||||||
|
}
|
||||||
|
|
||||||
|
// FingerprintSHA256 returns the user presentation of the key's
|
||||||
|
// fingerprint as unpadded base64 encoded sha256 hash.
|
||||||
|
// This format was introduced from OpenSSH 6.8.
|
||||||
|
// https://www.openssh.com/txt/release-6.8
|
||||||
|
// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding)
|
||||||
|
func FingerprintSHA256(pubKey PublicKey) string {
|
||||||
|
sha256sum := sha256.Sum256(pubKey.Marshal())
|
||||||
|
hash := base64.RawStdEncoding.EncodeToString(sha256sum[:])
|
||||||
|
return "SHA256:" + hash
|
||||||
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
|
|
||||||
type macMode struct {
|
type macMode struct {
|
||||||
keySize int
|
keySize int
|
||||||
|
etm bool
|
||||||
new func(key []byte) hash.Hash
|
new func(key []byte) hash.Hash
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,13 +46,16 @@ func (t truncatingMAC) Size() int {
|
||||||
func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
|
func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
|
||||||
|
|
||||||
var macModes = map[string]*macMode{
|
var macModes = map[string]*macMode{
|
||||||
"hmac-sha2-256": {32, func(key []byte) hash.Hash {
|
"hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash {
|
||||||
return hmac.New(sha256.New, key)
|
return hmac.New(sha256.New, key)
|
||||||
}},
|
}},
|
||||||
"hmac-sha1": {20, func(key []byte) hash.Hash {
|
"hmac-sha2-256": {32, false, func(key []byte) hash.Hash {
|
||||||
|
return hmac.New(sha256.New, key)
|
||||||
|
}},
|
||||||
|
"hmac-sha1": {20, false, func(key []byte) hash.Hash {
|
||||||
return hmac.New(sha1.New, key)
|
return hmac.New(sha1.New, key)
|
||||||
}},
|
}},
|
||||||
"hmac-sha1-96": {20, func(key []byte) hash.Hash {
|
"hmac-sha1-96": {20, false, func(key []byte) hash.Hash {
|
||||||
return truncatingMAC{12, hmac.New(sha1.New, key)}
|
return truncatingMAC{12, hmac.New(sha1.New, key)}
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,9 +116,9 @@ func (m *mux) Wait() error {
|
||||||
func newMux(p packetConn) *mux {
|
func newMux(p packetConn) *mux {
|
||||||
m := &mux{
|
m := &mux{
|
||||||
conn: p,
|
conn: p,
|
||||||
incomingChannels: make(chan NewChannel, 16),
|
incomingChannels: make(chan NewChannel, chanSize),
|
||||||
globalResponses: make(chan interface{}, 1),
|
globalResponses: make(chan interface{}, 1),
|
||||||
incomingRequests: make(chan *Request, 16),
|
incomingRequests: make(chan *Request, chanSize),
|
||||||
errCond: newCond(),
|
errCond: newCond(),
|
||||||
}
|
}
|
||||||
if debugMux {
|
if debugMux {
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The Permissions type holds fine-grained permissions that are
|
// The Permissions type holds fine-grained permissions that are
|
||||||
|
@ -188,7 +189,7 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error)
|
||||||
tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */)
|
tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */)
|
||||||
s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config)
|
s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config)
|
||||||
|
|
||||||
if err := s.transport.requestInitialKeyChange(); err != nil {
|
if err := s.transport.waitSession(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,7 +232,7 @@ func isAcceptableAlgo(algo string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkSourceAddress(addr net.Addr, sourceAddr string) error {
|
func checkSourceAddress(addr net.Addr, sourceAddrs string) error {
|
||||||
if addr == nil {
|
if addr == nil {
|
||||||
return errors.New("ssh: no address known for client, but source-address match required")
|
return errors.New("ssh: no address known for client, but source-address match required")
|
||||||
}
|
}
|
||||||
|
@ -241,8 +242,9 @@ func checkSourceAddress(addr net.Addr, sourceAddr string) error {
|
||||||
return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr)
|
return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, sourceAddr := range strings.Split(sourceAddrs, ",") {
|
||||||
if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil {
|
if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil {
|
||||||
if bytes.Equal(allowedIP, tcpAddr.IP) {
|
if allowedIP.Equal(tcpAddr.IP) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -255,12 +257,13 @@ func checkSourceAddress(addr net.Addr, sourceAddr string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr)
|
return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
|
func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
|
||||||
var err error
|
sessionID := s.transport.getSessionID()
|
||||||
var cache pubKeyCache
|
var cache pubKeyCache
|
||||||
var perms *Permissions
|
var perms *Permissions
|
||||||
|
|
||||||
|
@ -385,7 +388,7 @@ userAuthLoop:
|
||||||
if !isAcceptableAlgo(sig.Format) {
|
if !isAcceptableAlgo(sig.Format) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
signedData := buildDataSignedForAuth(s.transport.getSessionID(), userAuthReq, algoBytes, pubKeyData)
|
signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData)
|
||||||
|
|
||||||
if err := pubKey.Verify(signedData, sig); err != nil {
|
if err := pubKey.Verify(signedData, sig); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -421,12 +424,12 @@ userAuthLoop:
|
||||||
return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
|
return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = s.transport.writePacket(Marshal(&failureMsg)); err != nil {
|
if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil {
|
if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return perms, nil
|
return perms, nil
|
||||||
|
|
|
@ -8,8 +8,13 @@ import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
|
"log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// debugTransport if set, will print packet types as they go over the
|
||||||
|
// wire. No message decoding is done, to minimize the impact on timing.
|
||||||
|
const debugTransport = false
|
||||||
|
|
||||||
const (
|
const (
|
||||||
gcmCipherID = "aes128-gcm@openssh.com"
|
gcmCipherID = "aes128-gcm@openssh.com"
|
||||||
aes128cbcID = "aes128-cbc"
|
aes128cbcID = "aes128-cbc"
|
||||||
|
@ -22,7 +27,9 @@ type packetConn interface {
|
||||||
// Encrypt and send a packet of data to the remote peer.
|
// Encrypt and send a packet of data to the remote peer.
|
||||||
writePacket(packet []byte) error
|
writePacket(packet []byte) error
|
||||||
|
|
||||||
// Read a packet from the connection
|
// Read a packet from the connection. The read is blocking,
|
||||||
|
// i.e. if error is nil, then the returned byte slice is
|
||||||
|
// always non-empty.
|
||||||
readPacket() ([]byte, error)
|
readPacket() ([]byte, error)
|
||||||
|
|
||||||
// Close closes the write-side of the connection.
|
// Close closes the write-side of the connection.
|
||||||
|
@ -38,7 +45,7 @@ type transport struct {
|
||||||
bufReader *bufio.Reader
|
bufReader *bufio.Reader
|
||||||
bufWriter *bufio.Writer
|
bufWriter *bufio.Writer
|
||||||
rand io.Reader
|
rand io.Reader
|
||||||
|
isClient bool
|
||||||
io.Closer
|
io.Closer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,9 +91,38 @@ func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) err
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *transport) printPacket(p []byte, write bool) {
|
||||||
|
if len(p) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
who := "server"
|
||||||
|
if t.isClient {
|
||||||
|
who = "client"
|
||||||
|
}
|
||||||
|
what := "read"
|
||||||
|
if write {
|
||||||
|
what = "write"
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println(what, who, p[0])
|
||||||
|
}
|
||||||
|
|
||||||
// Read and decrypt next packet.
|
// Read and decrypt next packet.
|
||||||
func (t *transport) readPacket() ([]byte, error) {
|
func (t *transport) readPacket() (p []byte, err error) {
|
||||||
return t.reader.readPacket(t.bufReader)
|
for {
|
||||||
|
p, err = t.reader.readPacket(t.bufReader)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if debugTransport {
|
||||||
|
t.printPacket(p, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
|
func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
|
||||||
|
@ -129,6 +165,9 @@ func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *transport) writePacket(packet []byte) error {
|
func (t *transport) writePacket(packet []byte) error {
|
||||||
|
if debugTransport {
|
||||||
|
t.printPacket(packet, true)
|
||||||
|
}
|
||||||
return t.writer.writePacket(t.bufWriter, t.rand, packet)
|
return t.writer.writePacket(t.bufWriter, t.rand, packet)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,6 +208,8 @@ func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transp
|
||||||
},
|
},
|
||||||
Closer: rwc,
|
Closer: rwc,
|
||||||
}
|
}
|
||||||
|
t.isClient = isClient
|
||||||
|
|
||||||
if isClient {
|
if isClient {
|
||||||
t.reader.dir = serverKeys
|
t.reader.dir = serverKeys
|
||||||
t.writer.dir = clientKeys
|
t.writer.dir = clientKeys
|
||||||
|
@ -226,6 +267,7 @@ func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (pac
|
||||||
|
|
||||||
c := &streamPacketCipher{
|
c := &streamPacketCipher{
|
||||||
mac: macModes[algs.MAC].new(macKey),
|
mac: macModes[algs.MAC].new(macKey),
|
||||||
|
etm: macModes[algs.MAC].etm,
|
||||||
}
|
}
|
||||||
c.macResult = make([]byte, c.mac.Size())
|
c.macResult = make([]byte, c.mac.Size())
|
||||||
|
|
||||||
|
|
|
@ -575,10 +575,10 @@
|
||||||
"revision": "95ba30457eb1121fa27753627c774c7cd4e90083"
|
"revision": "95ba30457eb1121fa27753627c774c7cd4e90083"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "24M48du3803/HG1uZTo5aQxmPNk=",
|
"checksumSHA1": "6uNzRk3ScsTC+olpCXkW4M/L3fg=",
|
||||||
"path": "github.com/masterzen/winrm",
|
"path": "github.com/masterzen/winrm",
|
||||||
"revision": "a6cd420bebdc21d10c97296556295fcb99dd9635",
|
"revision": "b7e3d2de4979ce5eae5c1c1ef450040c5d675a89",
|
||||||
"revisionTime": "2017-01-16T07:57:11Z"
|
"revisionTime": "2017-01-26T07:02:57Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "KTsgWipT3ennAAtaKxEZairxero=",
|
"checksumSHA1": "KTsgWipT3ennAAtaKxEZairxero=",
|
||||||
|
@ -751,6 +751,12 @@
|
||||||
"revision": "7d6a4449b586546246087e96e5c97dbc450f4917",
|
"revision": "7d6a4449b586546246087e96e5c97dbc450f4917",
|
||||||
"revisionTime": "2016-09-28T15:38:44Z"
|
"revisionTime": "2016-09-28T15:38:44Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"checksumSHA1": "iHiMTBffQvWYlOLu3130JXuQpgQ=",
|
||||||
|
"path": "github.com/xanzy/ssh-agent",
|
||||||
|
"revision": "ba9c9e33906f58169366275e3450db66139a31a9",
|
||||||
|
"revisionTime": "2015-12-15T15:34:51Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "h+pFYiRHBogczS8/F1NoN3Ata44=",
|
"checksumSHA1": "h+pFYiRHBogczS8/F1NoN3Ata44=",
|
||||||
"path": "golang.org/x/crypto/curve25519",
|
"path": "golang.org/x/crypto/curve25519",
|
||||||
|
@ -774,16 +780,16 @@
|
||||||
"revision": "1f22c0103821b9390939b6776727195525381532"
|
"revision": "1f22c0103821b9390939b6776727195525381532"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "LlElMHeTC34ng8eHzjvtUhAgrr8=",
|
"checksumSHA1": "fsrFs762jlaILyqqQImS1GfvIvw=",
|
||||||
"path": "golang.org/x/crypto/ssh",
|
"path": "golang.org/x/crypto/ssh",
|
||||||
"revision": "9477e0b78b9ac3d0b03822fd95422e2fe07627cd",
|
"revision": "453249f01cfeb54c3d549ddb75ff152ca243f9d8",
|
||||||
"revisionTime": "2016-10-31T15:37:30Z"
|
"revisionTime": "2017-02-08T20:51:15Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "SJ3Ma3Ozavxpbh1usZWBCnzMKIc=",
|
"checksumSHA1": "SJ3Ma3Ozavxpbh1usZWBCnzMKIc=",
|
||||||
"path": "golang.org/x/crypto/ssh/agent",
|
"path": "golang.org/x/crypto/ssh/agent",
|
||||||
"revision": "7682e7e3945130cf3cde089834664f68afdd1523",
|
"revision": "453249f01cfeb54c3d549ddb75ff152ca243f9d8",
|
||||||
"revisionTime": "2016-10-03T20:54:26Z"
|
"revisionTime": "2017-02-08T20:51:15Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "5ARrN3Zq+E9zazFb/N+b08Serys=",
|
"checksumSHA1": "5ARrN3Zq+E9zazFb/N+b08Serys=",
|
||||||
|
|
|
@ -4,9 +4,9 @@ init:
|
||||||
bundle
|
bundle
|
||||||
|
|
||||||
docker-dev:
|
docker-dev:
|
||||||
docker run -it --expose 4567 -P -v "$PWD":/usr/src/app -w /usr/src/app ruby:2.3.1 \
|
docker run -it --expose 4567 -p 4567:4567 -v "$(PWD)":/usr/src/app -w /usr/src/app ruby:2.3.1 \
|
||||||
bash -c "apt-get update && apt-get -qy install curl git libgmp3-dev nodejs && \
|
bash -c "apt-get update && apt-get -qy install curl git libgmp3-dev nodejs && \
|
||||||
gem install bundler && make dev"
|
gem install bundler && bundle install && make dev"
|
||||||
|
|
||||||
dev: init
|
dev: init
|
||||||
PACKER_DISABLE_DOWNLOAD_FETCH=true PACKER_VERSION=1.0 bundle exec middleman server
|
PACKER_DISABLE_DOWNLOAD_FETCH=true PACKER_VERSION=1.0 bundle exec middleman server
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue