added: testing

This commit is contained in:
Brett Wandel 2019-04-19 17:10:48 +10:00
parent 76b5c1995f
commit aaf56ffd26
4 changed files with 109 additions and 127 deletions

View File

@ -186,6 +186,7 @@ func (c *BuildCommand) Run(args []string) int {
if err := limitParallel.Acquire(buildCtx, 1); err != nil {
ui.Error(fmt.Sprintf("Build '%s' failed to acquire semaphore: %s", name, err))
errors[name] = err
return
}
defer limitParallel.Release(1)

View File

@ -0,0 +1,98 @@
package command
import (
"bytes"
"context"
"fmt"
"math"
"path/filepath"
"sync"
"testing"
"github.com/hashicorp/packer/packer"
)
type ParallelTestBuilder struct {
Prepared int
Built int
wg *sync.WaitGroup
m *sync.Mutex
}
func (b *ParallelTestBuilder) Prepare(raws ...interface{}) ([]string, error) {
b.Prepared++
return nil, nil
}
func (b *ParallelTestBuilder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (packer.Artifact, error) {
ui.Say(fmt.Sprintf("count: %d", b.Built))
b.Built++
b.wg.Done()
b.m.Lock()
b.m.Unlock()
return nil, nil
}
// testMetaFile creates a Meta object that includes a file builder
func testMetaParallel(t *testing.T, builder *ParallelTestBuilder) Meta {
var out, err bytes.Buffer
return Meta{
CoreConfig: &packer.CoreConfig{
Components: packer.ComponentFinder{
Builder: func(n string) (packer.Builder, error) {
switch n {
case "parallel-test":
return builder, nil
default:
panic(n)
}
},
},
},
Ui: &packer.BasicUi{
Writer: &out,
ErrorWriter: &err,
},
}
}
func TestBuildParallel(t *testing.T) {
defer cleanup()
m := &sync.Mutex{}
m.Lock()
expected := 2
wg := &sync.WaitGroup{}
wg.Add(expected)
b := &ParallelTestBuilder{
wg: wg,
m: m,
}
c := &BuildCommand{
Meta: testMetaParallel(t, b),
}
args := []string{
fmt.Sprintf("-parallel=%d", expected),
filepath.Join(testFixture("parallel"), "template.json"),
}
go func(t *testing.T, c *BuildCommand) {
if code := c.Run(args); code != 0 {
fatalCommand(t, c.Meta)
}
}(t, c)
wg.Wait()
if b.Prepared != 6 {
t.Errorf("Expected all builds to be prepared, was %d", b.Prepared)
}
if b.Built != expected {
t.Errorf("Expected only %d running/completed builds, was %d", expected, b.Built)
}
m.Unlock()
wg.Add(math.MaxInt32)
}

View File

@ -0,0 +1,10 @@
{
"builders": [
{"type": "parallel-test", "name": "build0"},
{"type": "parallel-test", "name": "build1"},
{"type": "parallel-test", "name": "build2"},
{"type": "parallel-test", "name": "build3"},
{"type": "parallel-test", "name": "build4"},
{"type": "parallel-test", "name": "build5"}
]
}

View File

@ -1,127 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package semaphore provides a weighted semaphore implementation.
package semaphore // import "golang.org/x/sync/semaphore"
import (
"container/list"
"context"
"sync"
)
type waiter struct {
n int64
ready chan<- struct{} // Closed when semaphore acquired.
}
// NewWeighted creates a new weighted semaphore with the given
// maximum combined weight for concurrent access.
func NewWeighted(n int64) *Weighted {
w := &Weighted{size: n}
return w
}
// Weighted provides a way to bound concurrent access to a resource.
// The callers can request access with a given weight.
type Weighted struct {
size int64
cur int64
mu sync.Mutex
waiters list.List
}
// Acquire acquires the semaphore with a weight of n, blocking until resources
// are available or ctx is done. On success, returns nil. On failure, returns
// ctx.Err() and leaves the semaphore unchanged.
//
// If ctx is already done, Acquire may still succeed without blocking.
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
s.mu.Lock()
if s.size-s.cur >= n && s.waiters.Len() == 0 {
s.cur += n
s.mu.Unlock()
return nil
}
if n > s.size {
// Don't make other Acquire calls block on one that's doomed to fail.
s.mu.Unlock()
<-ctx.Done()
return ctx.Err()
}
ready := make(chan struct{})
w := waiter{n: n, ready: ready}
elem := s.waiters.PushBack(w)
s.mu.Unlock()
select {
case <-ctx.Done():
err := ctx.Err()
s.mu.Lock()
select {
case <-ready:
// Acquired the semaphore after we were canceled. Rather than trying to
// fix up the queue, just pretend we didn't notice the cancelation.
err = nil
default:
s.waiters.Remove(elem)
}
s.mu.Unlock()
return err
case <-ready:
return nil
}
}
// TryAcquire acquires the semaphore with a weight of n without blocking.
// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
func (s *Weighted) TryAcquire(n int64) bool {
s.mu.Lock()
success := s.size-s.cur >= n && s.waiters.Len() == 0
if success {
s.cur += n
}
s.mu.Unlock()
return success
}
// Release releases the semaphore with a weight of n.
func (s *Weighted) Release(n int64) {
s.mu.Lock()
s.cur -= n
if s.cur < 0 {
s.mu.Unlock()
panic("semaphore: bad release")
}
for {
next := s.waiters.Front()
if next == nil {
break // No more waiters blocked.
}
w := next.Value.(waiter)
if s.size-s.cur < w.n {
// Not enough tokens for the next waiter. We could keep going (to try to
// find a waiter with a smaller request), but under load that could cause
// starvation for large requests; instead, we leave all remaining waiters
// blocked.
//
// Consider a semaphore used as a read-write lock, with N tokens, N
// readers, and one writer. Each reader can Acquire(1) to obtain a read
// lock. The writer can Acquire(N) to obtain a write lock, excluding all
// of the readers. If we allow the readers to jump ahead in the queue,
// the writer will starve — there is always one token available for every
// reader.
break
}
s.cur += w.n
s.waiters.Remove(next)
close(w.ready)
}
s.mu.Unlock()
}