Fixing transient AWS errors during EBS builds
Relates to #1539 AWS is eventually consistent and instance can be not visibile for some time after creation. This fix eliminates describe-instances call before going to the proper wait loop
This commit is contained in:
parent
fc6b78b8dd
commit
d046e1cc5b
|
@ -63,9 +63,9 @@ func AMIStateRefreshFunc(conn *ec2.EC2, imageId string) StateRefreshFunc {
|
|||
|
||||
// InstanceStateRefreshFunc returns a StateRefreshFunc that is used to watch
|
||||
// an EC2 instance.
|
||||
func InstanceStateRefreshFunc(conn *ec2.EC2, i *ec2.Instance) StateRefreshFunc {
|
||||
func InstanceStateRefreshFunc(conn *ec2.EC2, instanceId string) StateRefreshFunc {
|
||||
return func() (interface{}, string, error) {
|
||||
resp, err := conn.Instances([]string{i.InstanceId}, ec2.NewFilter())
|
||||
resp, err := conn.Instances([]string{instanceId}, ec2.NewFilter())
|
||||
if err != nil {
|
||||
if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidInstanceID.NotFound" {
|
||||
// Set this to nil as if we didn't find anything.
|
||||
|
@ -85,7 +85,7 @@ func InstanceStateRefreshFunc(conn *ec2.EC2, i *ec2.Instance) StateRefreshFunc {
|
|||
return nil, "", nil
|
||||
}
|
||||
|
||||
i = &resp.Reservations[0].Instances[0]
|
||||
i := &resp.Reservations[0].Instances[0]
|
||||
return i, i.State.Name, nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -195,21 +195,13 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
|
|||
instanceId = spotResp.SpotRequestResults[0].InstanceId
|
||||
}
|
||||
|
||||
instanceResp, err := ec2conn.Instances([]string{instanceId}, nil)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error finding source instance (%s): %s", instanceId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
s.instance = &instanceResp.Reservations[0].Instances[0]
|
||||
ui.Message(fmt.Sprintf("Instance ID: %s", s.instance.InstanceId))
|
||||
ui.Message(fmt.Sprintf("Instance ID: %s", instanceId))
|
||||
|
||||
ui.Say(fmt.Sprintf("Waiting for instance (%s) to become ready...", s.instance.InstanceId))
|
||||
ui.Say(fmt.Sprintf("Waiting for instance (%v) to become ready...", instanceId))
|
||||
stateChange := StateChangeConf{
|
||||
Pending: []string{"pending"},
|
||||
Target: "running",
|
||||
Refresh: InstanceStateRefreshFunc(ec2conn, s.instance),
|
||||
Refresh: InstanceStateRefreshFunc(ec2conn, instanceId),
|
||||
StepState: state,
|
||||
}
|
||||
latestInstance, err := WaitForState(&stateChange)
|
||||
|
@ -285,7 +277,7 @@ func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) {
|
|||
}
|
||||
stateChange := StateChangeConf{
|
||||
Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"},
|
||||
Refresh: InstanceStateRefreshFunc(ec2conn, s.instance),
|
||||
Refresh: InstanceStateRefreshFunc(ec2conn, s.instance.InstanceId),
|
||||
Target: "terminated",
|
||||
}
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ func (s *stepCreateAMI) Cleanup(state multistep.StateBag) {
|
|||
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", err))
|
||||
return
|
||||
} else if resp.Return == false {
|
||||
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %t", resp.Return))
|
||||
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ func (s *stepStopInstance) Run(state multistep.StateBag) multistep.StepAction {
|
|||
stateChange := awscommon.StateChangeConf{
|
||||
Pending: []string{"running", "stopping"},
|
||||
Target: "stopped",
|
||||
Refresh: awscommon.InstanceStateRefreshFunc(ec2conn, instance),
|
||||
Refresh: awscommon.InstanceStateRefreshFunc(ec2conn, instance.InstanceId),
|
||||
StepState: state,
|
||||
}
|
||||
_, err = awscommon.WaitForState(&stateChange)
|
||||
|
|
Loading…
Reference in New Issue