rename retry so it doesn't stutter

This commit is contained in:
Megan Marsh 2020-11-16 11:49:33 -08:00
parent f52a2ad0fa
commit 94a660147e
6 changed files with 10 additions and 10 deletions

View File

@ -15,7 +15,7 @@ var RetryExhaustedError error = fmt.Errorf("Function never succeeded in Retry")
type RetryableFunc func(uint) (bool, error)
/*
Retry retries a function up to numTries times with exponential backoff.
Run retries a function up to numTries times with exponential backoff.
If numTries == 0, retry indefinitely.
If interval == 0, Retry will not delay retrying and there will be no
exponential backoff.
@ -24,7 +24,7 @@ Intervals are in seconds.
Returns an error if initial > max intervals, if retries are exhausted, or if the passed function returns
an error.
*/
func Retry(initialInterval float64, maxInterval float64, numTries uint, function RetryableFunc) error {
func Run(initialInterval float64, maxInterval float64, numTries uint, function RetryableFunc) error {
if maxInterval == 0 {
maxInterval = math.Inf(1)
} else if initialInterval < 0 || initialInterval > maxInterval {

View File

@ -8,7 +8,7 @@ import (
func TestRetry(t *testing.T) {
numTries := uint(0)
// Test that a passing function only gets called once.
err := Retry(0, 0, 0, func(i uint) (bool, error) {
err := Run(0, 0, 0, func(i uint) (bool, error) {
numTries++
return true, nil
})
@ -22,7 +22,7 @@ func TestRetry(t *testing.T) {
// Test that a failing function gets retried (once in this example).
numTries = 0
results := []bool{false, true}
err = Retry(0, 0, 0, func(i uint) (bool, error) {
err = Run(0, 0, 0, func(i uint) (bool, error) {
result := results[numTries]
numTries++
return result, nil
@ -37,7 +37,7 @@ func TestRetry(t *testing.T) {
// Test that a function error gets returned, and the function does not get called again.
numTries = 0
funcErr := fmt.Errorf("This function had an error!")
err = Retry(0, 0, 0, func(i uint) (bool, error) {
err = Run(0, 0, 0, func(i uint) (bool, error) {
numTries++
return false, funcErr
})
@ -51,7 +51,7 @@ func TestRetry(t *testing.T) {
// Test when a function exhausts its retries.
numTries = 0
expectedTries := uint(3)
err = Retry(0, 0, expectedTries, func(i uint) (bool, error) {
err = Run(0, 0, expectedTries, func(i uint) (bool, error) {
numTries++
return false, nil
})

View File

@ -68,7 +68,7 @@ func WaitUntilOscSnapshotDone(conn *osc.APIClient, snapshotID string) error {
}
func waitForState(errCh chan<- error, target string, refresh stateRefreshFunc) {
err := retry.Retry(2, 2, 0, func(_ uint) (bool, error) {
err := retry.Run(2, 2, 0, func(_ uint) (bool, error) {
state, err := refresh()
if err != nil {
return false, err

View File

@ -90,7 +90,7 @@ func (s *StepCreateTags) Run(_ context.Context, state multistep.StateBag) multis
snapshotTags.Report(ui)
// Retry creating tags for about 2.5 minutes
err = retry.Retry(0.2, 30, 11, func(_ uint) (bool, error) {
err = retry.Run(0.2, 30, 11, func(_ uint) (bool, error) {
// Tag images and snapshots
_, _, err := regionconn.TagApi.CreateTags(context.Background(), &osc.CreateTagsOpts{
CreateTagsRequest: optional.NewInterface(osc.CreateTagsRequest{

View File

@ -234,7 +234,7 @@ func (s *StepRunSourceVm) Run(ctx context.Context, state multistep.StateBag) mul
if s.IsRestricted {
oscTags.Report(ui)
// Retry creating tags for about 2.5 minutes
err = retry.Retry(0.2, 30, 11, func(_ uint) (bool, error) {
err = retry.Run(0.2, 30, 11, func(_ uint) (bool, error) {
_, _, err := oscconn.TagApi.CreateTags(context.Background(), &osc.CreateTagsOpts{
CreateTagsRequest: optional.NewInterface(osc.CreateTagsRequest{
Tags: oscTags,

View File

@ -41,7 +41,7 @@ func (s *StepStopBSUBackedVm) Run(ctx context.Context, state multistep.StateBag)
// does not exist.
// Work around this by retrying a few times, up to about 5 minutes.
err := retry.Retry(10, 60, 6, func(i uint) (bool, error) {
err := retry.Run(10, 60, 6, func(i uint) (bool, error) {
ui.Message(fmt.Sprintf("Stopping vm, attempt %d", i+1))
_, _, err = oscconn.VmApi.StopVms(context.Background(), &osc.StopVmsOpts{