Merge pull request #5137 from vilkaspilkas/f-googlecompute-accelerator
Add accelerator api support to googlecompute builder
This commit is contained in:
commit
3974976518
|
@ -26,6 +26,8 @@ type Config struct {
|
|||
AccountFile string `mapstructure:"account_file"`
|
||||
ProjectId string `mapstructure:"project_id"`
|
||||
|
||||
AcceleratorType string `mapstructure:"accelerator_type"`
|
||||
AcceleratorCount int64 `mapstructure:"accelerator_count"`
|
||||
Address string `mapstructure:"address"`
|
||||
DiskName string `mapstructure:"disk_name"`
|
||||
DiskSizeGb int64 `mapstructure:"disk_size"`
|
||||
|
@ -205,6 +207,14 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
|
|||
errs = packer.MultiErrorAppend(fmt.Errorf("'use_internal_ip' must be true if 'omit_external_ip' is true"))
|
||||
}
|
||||
|
||||
if c.AcceleratorCount > 0 && len(c.AcceleratorType) == 0 {
|
||||
errs = packer.MultiErrorAppend(fmt.Errorf("'accelerator_type' must be set when 'accelerator_count' is more than 0"))
|
||||
}
|
||||
|
||||
if c.AcceleratorCount > 0 && c.OnHostMaintenance != "TERMINATE" {
|
||||
errs = packer.MultiErrorAppend(fmt.Errorf("'on_host_maintenance' must be set to 'TERMINATE' when 'accelerator_count' is more than 0"))
|
||||
}
|
||||
|
||||
// Check for any errors.
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return nil, nil, errs
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package googlecompute
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -190,6 +191,65 @@ func TestConfigPrepare(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConfigPrepareAccelerator(t *testing.T) {
|
||||
cases := []struct {
|
||||
Keys []string
|
||||
Values []interface{}
|
||||
Err bool
|
||||
}{
|
||||
{
|
||||
[]string{"accelerator_count", "on_host_maintenance", "accelerator_type"},
|
||||
[]interface{}{1, "MIGRATE", "something_valid"},
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]string{"accelerator_count", "on_host_maintenance", "accelerator_type"},
|
||||
[]interface{}{1, "TERMINATE", "something_valid"},
|
||||
false,
|
||||
},
|
||||
{
|
||||
[]string{"accelerator_count", "on_host_maintenance", "accelerator_type"},
|
||||
[]interface{}{1, "TERMINATE", nil},
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]string{"accelerator_count", "on_host_maintenance", "accelerator_type"},
|
||||
[]interface{}{1, "TERMINATE", ""},
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]string{"accelerator_count", "on_host_maintenance", "accelerator_type"},
|
||||
[]interface{}{1, "TERMINATE", "something_valid"},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
raw := testConfig(t)
|
||||
|
||||
errStr := ""
|
||||
for k := range tc.Keys {
|
||||
|
||||
// Create the string for error reporting
|
||||
// convert value to string if it can be converted
|
||||
errStr += fmt.Sprintf("%s:%v, ", tc.Keys[k], tc.Values[k])
|
||||
if tc.Values[k] == nil {
|
||||
delete(raw, tc.Keys[k])
|
||||
} else {
|
||||
raw[tc.Keys[k]] = tc.Values[k]
|
||||
}
|
||||
}
|
||||
|
||||
_, warns, errs := NewConfig(raw)
|
||||
|
||||
if tc.Err {
|
||||
testConfigErr(t, warns, errs, strings.TrimRight(errStr, ", "))
|
||||
} else {
|
||||
testConfigOk(t, warns, errs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigDefaults(t *testing.T) {
|
||||
cases := []struct {
|
||||
Read func(c *Config) interface{}
|
||||
|
|
|
@ -58,6 +58,8 @@ type Driver interface {
|
|||
}
|
||||
|
||||
type InstanceConfig struct {
|
||||
AcceleratorType string
|
||||
AcceleratorCount int64
|
||||
Address string
|
||||
Description string
|
||||
DiskSizeGb int64
|
||||
|
|
|
@ -380,6 +380,15 @@ func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) {
|
|||
})
|
||||
}
|
||||
|
||||
var guestAccelerators []*compute.AcceleratorConfig
|
||||
if c.AcceleratorCount > 0 {
|
||||
ac := &compute.AcceleratorConfig{
|
||||
AcceleratorCount: c.AcceleratorCount,
|
||||
AcceleratorType: c.AcceleratorType,
|
||||
}
|
||||
guestAccelerators = append(guestAccelerators, ac)
|
||||
}
|
||||
|
||||
// Create the instance information
|
||||
instance := compute.Instance{
|
||||
Description: c.Description,
|
||||
|
@ -397,7 +406,8 @@ func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) {
|
|||
},
|
||||
},
|
||||
},
|
||||
MachineType: machineType.SelfLink,
|
||||
GuestAccelerators: guestAccelerators,
|
||||
MachineType: machineType.SelfLink,
|
||||
Metadata: &compute.Metadata{
|
||||
Items: metadata,
|
||||
},
|
||||
|
|
|
@ -99,6 +99,8 @@ func (s *StepCreateInstance) Run(state multistep.StateBag) multistep.StepAction
|
|||
var metadata map[string]string
|
||||
metadata, err = c.createInstanceMetadata(sourceImage, sshPublicKey)
|
||||
errCh, err = d.RunInstance(&InstanceConfig{
|
||||
AcceleratorType: c.AcceleratorType,
|
||||
AcceleratorCount: c.AcceleratorCount,
|
||||
Address: c.Address,
|
||||
Description: "New instance created by Packer",
|
||||
DiskSizeGb: c.DiskSizeGb,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -11,8 +11,8 @@ import (
|
|||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// ResumableBuffer buffers data from an io.Reader to support uploading media in retryable chunks.
|
||||
type ResumableBuffer struct {
|
||||
// MediaBuffer buffers data from an io.Reader to support uploading media in retryable chunks.
|
||||
type MediaBuffer struct {
|
||||
media io.Reader
|
||||
|
||||
chunk []byte // The current chunk which is pending upload. The capacity is the chunk size.
|
||||
|
@ -22,42 +22,42 @@ type ResumableBuffer struct {
|
|||
off int64
|
||||
}
|
||||
|
||||
func NewResumableBuffer(media io.Reader, chunkSize int) *ResumableBuffer {
|
||||
return &ResumableBuffer{media: media, chunk: make([]byte, 0, chunkSize)}
|
||||
func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer {
|
||||
return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)}
|
||||
}
|
||||
|
||||
// Chunk returns the current buffered chunk, the offset in the underlying media
|
||||
// from which the chunk is drawn, and the size of the chunk.
|
||||
// Successive calls to Chunk return the same chunk between calls to Next.
|
||||
func (rb *ResumableBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) {
|
||||
func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) {
|
||||
// There may already be data in chunk if Next has not been called since the previous call to Chunk.
|
||||
if rb.err == nil && len(rb.chunk) == 0 {
|
||||
rb.err = rb.loadChunk()
|
||||
if mb.err == nil && len(mb.chunk) == 0 {
|
||||
mb.err = mb.loadChunk()
|
||||
}
|
||||
return bytes.NewReader(rb.chunk), rb.off, len(rb.chunk), rb.err
|
||||
return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err
|
||||
}
|
||||
|
||||
// loadChunk will read from media into chunk, up to the capacity of chunk.
|
||||
func (rb *ResumableBuffer) loadChunk() error {
|
||||
bufSize := cap(rb.chunk)
|
||||
rb.chunk = rb.chunk[:bufSize]
|
||||
func (mb *MediaBuffer) loadChunk() error {
|
||||
bufSize := cap(mb.chunk)
|
||||
mb.chunk = mb.chunk[:bufSize]
|
||||
|
||||
read := 0
|
||||
var err error
|
||||
for err == nil && read < bufSize {
|
||||
var n int
|
||||
n, err = rb.media.Read(rb.chunk[read:])
|
||||
n, err = mb.media.Read(mb.chunk[read:])
|
||||
read += n
|
||||
}
|
||||
rb.chunk = rb.chunk[:read]
|
||||
mb.chunk = mb.chunk[:read]
|
||||
return err
|
||||
}
|
||||
|
||||
// Next advances to the next chunk, which will be returned by the next call to Chunk.
|
||||
// Calls to Next without a corresponding prior call to Chunk will have no effect.
|
||||
func (rb *ResumableBuffer) Next() {
|
||||
rb.off += int64(len(rb.chunk))
|
||||
rb.chunk = rb.chunk[0:0]
|
||||
func (mb *MediaBuffer) Next() {
|
||||
mb.off += int64(len(mb.chunk))
|
||||
mb.chunk = mb.chunk[0:0]
|
||||
}
|
||||
|
||||
type readerTyper struct {
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GoogleClientHeader returns the value to use for the x-goog-api-client
|
||||
// header, which is used internally by Google.
|
||||
func GoogleClientHeader(generatorVersion, clientElement string) string {
|
||||
elts := []string{"gl-go/" + strings.Replace(runtime.Version(), " ", "_", -1)}
|
||||
if clientElement != "" {
|
||||
elts = append(elts, clientElement)
|
||||
}
|
||||
elts = append(elts, fmt.Sprintf("gdcl/%s", generatorVersion))
|
||||
return strings.Join(elts, " ")
|
||||
}
|
|
@ -12,29 +12,43 @@ import (
|
|||
)
|
||||
|
||||
// MarshalJSON returns a JSON encoding of schema containing only selected fields.
|
||||
// A field is selected if:
|
||||
// * it has a non-empty value, or
|
||||
// * its field name is present in forceSendFields, and
|
||||
// * it is not a nil pointer or nil interface.
|
||||
// A field is selected if any of the following is true:
|
||||
// * it has a non-empty value
|
||||
// * its field name is present in forceSendFields and it is not a nil pointer or nil interface
|
||||
// * its field name is present in nullFields.
|
||||
// The JSON key for each selected field is taken from the field's json: struct tag.
|
||||
func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) {
|
||||
if len(forceSendFields) == 0 {
|
||||
func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) {
|
||||
if len(forceSendFields) == 0 && len(nullFields) == 0 {
|
||||
return json.Marshal(schema)
|
||||
}
|
||||
|
||||
mustInclude := make(map[string]struct{})
|
||||
mustInclude := make(map[string]bool)
|
||||
for _, f := range forceSendFields {
|
||||
mustInclude[f] = struct{}{}
|
||||
mustInclude[f] = true
|
||||
}
|
||||
useNull := make(map[string]bool)
|
||||
useNullMaps := make(map[string]map[string]bool)
|
||||
for _, nf := range nullFields {
|
||||
parts := strings.SplitN(nf, ".", 2)
|
||||
field := parts[0]
|
||||
if len(parts) == 1 {
|
||||
useNull[field] = true
|
||||
} else {
|
||||
if useNullMaps[field] == nil {
|
||||
useNullMaps[field] = map[string]bool{}
|
||||
}
|
||||
useNullMaps[field][parts[1]] = true
|
||||
}
|
||||
}
|
||||
|
||||
dataMap, err := schemaToMap(schema, mustInclude)
|
||||
dataMap, err := schemaToMap(schema, mustInclude, useNull, useNullMaps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(dataMap)
|
||||
}
|
||||
|
||||
func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) {
|
||||
func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) {
|
||||
m := make(map[string]interface{})
|
||||
s := reflect.ValueOf(schema)
|
||||
st := s.Type()
|
||||
|
@ -54,10 +68,36 @@ func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[strin
|
|||
|
||||
v := s.Field(i)
|
||||
f := st.Field(i)
|
||||
|
||||
if useNull[f.Name] {
|
||||
if !isEmptyValue(v) {
|
||||
return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name)
|
||||
}
|
||||
m[tag.apiName] = nil
|
||||
continue
|
||||
}
|
||||
|
||||
if !includeField(v, f, mustInclude) {
|
||||
continue
|
||||
}
|
||||
|
||||
// If map fields are explicitly set to null, use a map[string]interface{}.
|
||||
if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil {
|
||||
ms, ok := v.Interface().(map[string]string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name)
|
||||
}
|
||||
mi := map[string]interface{}{}
|
||||
for k, v := range ms {
|
||||
mi[k] = v
|
||||
}
|
||||
for k := range useNullMaps[f.Name] {
|
||||
mi[k] = nil
|
||||
}
|
||||
m[tag.apiName] = mi
|
||||
continue
|
||||
}
|
||||
|
||||
// nil maps are treated as empty maps.
|
||||
if f.Type.Kind() == reflect.Map && v.IsNil() {
|
||||
m[tag.apiName] = map[string]string{}
|
||||
|
@ -127,7 +167,7 @@ func parseJSONTag(val string) (jsonTag, error) {
|
|||
}
|
||||
|
||||
// Reports whether the struct field "f" with value "v" should be included in JSON output.
|
||||
func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool {
|
||||
func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]bool) bool {
|
||||
// The regular JSON encoding of a nil pointer is "null", which means "delete this field".
|
||||
// Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set.
|
||||
// However, many fields are not pointers, so there would be no way to delete these fields.
|
||||
|
@ -144,8 +184,7 @@ func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string
|
|||
return false
|
||||
}
|
||||
|
||||
_, ok := mustInclude[f.Name]
|
||||
return ok || !isEmptyValue(v)
|
||||
return mustInclude[f.Name] || !isEmptyValue(v)
|
||||
}
|
||||
|
||||
// isEmptyValue reports whether v is the empty value for its type. This
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
// JSONFloat64 is a float64 that supports proper unmarshaling of special float
|
||||
// values in JSON, according to
|
||||
// https://developers.google.com/protocol-buffers/docs/proto3#json. Although
|
||||
// that is a proto-to-JSON spec, it applies to all Google APIs.
|
||||
//
|
||||
// The jsonpb package
|
||||
// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has
|
||||
// similar functionality, but only for direct translation from proto messages
|
||||
// to JSON.
|
||||
type JSONFloat64 float64
|
||||
|
||||
func (f *JSONFloat64) UnmarshalJSON(data []byte) error {
|
||||
var ff float64
|
||||
if err := json.Unmarshal(data, &ff); err == nil {
|
||||
*f = JSONFloat64(ff)
|
||||
return nil
|
||||
}
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err == nil {
|
||||
switch s {
|
||||
case "NaN":
|
||||
ff = math.NaN()
|
||||
case "Infinity":
|
||||
ff = math.Inf(1)
|
||||
case "-Infinity":
|
||||
ff = math.Inf(-1)
|
||||
default:
|
||||
return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s)
|
||||
}
|
||||
*f = JSONFloat64(ff)
|
||||
return nil
|
||||
}
|
||||
return errors.New("google.golang.org/api/internal: data not float or string")
|
||||
}
|
|
@ -176,25 +176,24 @@ func typeHeader(contentType string) textproto.MIMEHeader {
|
|||
// chunkSize is the size of the chunk that media should be split into.
|
||||
// If chunkSize is non-zero and the contents of media do not fit in a single
|
||||
// chunk (or there is an error reading media), then media will be returned as a
|
||||
// ResumableBuffer. Otherwise, media will be returned as a Reader.
|
||||
// MediaBuffer. Otherwise, media will be returned as a Reader.
|
||||
//
|
||||
// After PrepareUpload has been called, media should no longer be used: the
|
||||
// media content should be accessed via one of the return values.
|
||||
func PrepareUpload(media io.Reader, chunkSize int) (io.Reader,
|
||||
*ResumableBuffer) {
|
||||
func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, *MediaBuffer) {
|
||||
if chunkSize == 0 { // do not chunk
|
||||
return media, nil
|
||||
}
|
||||
|
||||
rb := NewResumableBuffer(media, chunkSize)
|
||||
rdr, _, _, err := rb.Chunk()
|
||||
mb := NewMediaBuffer(media, chunkSize)
|
||||
rdr, _, _, err := mb.Chunk()
|
||||
|
||||
if err == io.EOF { // we can upload this in a single request
|
||||
return rdr, nil
|
||||
}
|
||||
// err might be a non-EOF error. If it is, the next call to rb.Chunk will
|
||||
// return the same error. Returning a ResumableBuffer ensures that this error
|
||||
// err might be a non-EOF error. If it is, the next call to mb.Chunk will
|
||||
// return the same error. Returning a MediaBuffer ensures that this error
|
||||
// will be handled at some point.
|
||||
|
||||
return nil, rb
|
||||
return nil, mb
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
package gensupport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -12,14 +13,9 @@ import (
|
|||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
const (
|
||||
// statusResumeIncomplete is the code returned by the Google uploader
|
||||
// when the transfer is not yet complete.
|
||||
statusResumeIncomplete = 308
|
||||
|
||||
// statusTooManyRequests is returned by the storage API if the
|
||||
// per-project limits have been temporarily exceeded. The request
|
||||
// should be retried.
|
||||
|
@ -35,7 +31,7 @@ type ResumableUpload struct {
|
|||
URI string
|
||||
UserAgent string // User-Agent for header of the request
|
||||
// Media is the object being uploaded.
|
||||
Media *ResumableBuffer
|
||||
Media *MediaBuffer
|
||||
// MediaType defines the media type, e.g. "image/jpeg".
|
||||
MediaType string
|
||||
|
||||
|
@ -80,8 +76,23 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader,
|
|||
req.Header.Set("Content-Range", contentRange)
|
||||
req.Header.Set("Content-Type", rx.MediaType)
|
||||
req.Header.Set("User-Agent", rx.UserAgent)
|
||||
return ctxhttp.Do(ctx, rx.Client, req)
|
||||
|
||||
// Google's upload endpoint uses status code 308 for a
|
||||
// different purpose than the "308 Permanent Redirect"
|
||||
// since-standardized in RFC 7238. Because of the conflict in
|
||||
// semantics, Google added this new request header which
|
||||
// causes it to not use "308" and instead reply with 200 OK
|
||||
// and sets the upload-specific "X-HTTP-Status-Code-Override:
|
||||
// 308" response header.
|
||||
req.Header.Set("X-GUploader-No-308", "yes")
|
||||
|
||||
return SendRequest(ctx, rx.Client, req)
|
||||
}
|
||||
|
||||
func statusResumeIncomplete(resp *http.Response) bool {
|
||||
// This is how the server signals "status resume incomplete"
|
||||
// when X-GUploader-No-308 is set to "yes":
|
||||
return resp != nil && resp.Header.Get("X-Http-Status-Code-Override") == "308"
|
||||
}
|
||||
|
||||
// reportProgress calls a user-supplied callback to report upload progress.
|
||||
|
@ -112,11 +123,17 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e
|
|||
return res, err
|
||||
}
|
||||
|
||||
if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK {
|
||||
// We sent "X-GUploader-No-308: yes" (see comment elsewhere in
|
||||
// this file), so we don't expect to get a 308.
|
||||
if res.StatusCode == 308 {
|
||||
return nil, errors.New("unexpected 308 response status code")
|
||||
}
|
||||
|
||||
if res.StatusCode == http.StatusOK {
|
||||
rx.reportProgress(off, off+int64(size))
|
||||
}
|
||||
|
||||
if res.StatusCode == statusResumeIncomplete {
|
||||
if statusResumeIncomplete(res) {
|
||||
rx.Media.Next()
|
||||
}
|
||||
return res, nil
|
||||
|
@ -135,6 +152,8 @@ func contextDone(ctx context.Context) bool {
|
|||
// It retries using the provided back off strategy until cancelled or the
|
||||
// strategy indicates to stop retrying.
|
||||
// It is called from the auto-generated API code and is not visible to the user.
|
||||
// Before sending an HTTP request, Upload calls any registered hook functions,
|
||||
// and calls the returned functions after the request returns (see send.go).
|
||||
// rx is private to the auto-generated API code.
|
||||
// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close.
|
||||
func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) {
|
||||
|
@ -176,7 +195,7 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err
|
|||
|
||||
// If the chunk was uploaded successfully, but there's still
|
||||
// more to go, upload the next chunk without any delay.
|
||||
if status == statusResumeIncomplete {
|
||||
if statusResumeIncomplete(resp) {
|
||||
pause = 0
|
||||
backoff.Reset()
|
||||
resp.Body.Close()
|
||||
|
|
|
@ -1,3 +1,17 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
|
@ -55,23 +69,17 @@ func DefaultBackoffStrategy() BackoffStrategy {
|
|||
// shouldRetry returns true if the HTTP response / error indicates that the
|
||||
// request should be attempted again.
|
||||
func shouldRetry(status int, err error) bool {
|
||||
// Retry for 5xx response codes.
|
||||
if 500 <= status && status < 600 {
|
||||
if 500 <= status && status <= 599 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Retry on statusTooManyRequests{
|
||||
if status == statusTooManyRequests {
|
||||
return true
|
||||
}
|
||||
|
||||
// Retry on unexpected EOFs and temporary network errors.
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return true
|
||||
}
|
||||
if err, ok := err.(net.Error); ok {
|
||||
return err.Temporary()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
// Hook is the type of a function that is called once before each HTTP request
|
||||
// that is sent by a generated API. It returns a function that is called after
|
||||
// the request returns.
|
||||
// Hooks are not called if the context is nil.
|
||||
type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response)
|
||||
|
||||
var hooks []Hook
|
||||
|
||||
// RegisterHook registers a Hook to be called before each HTTP request by a
|
||||
// generated API. Hooks are called in the order they are registered. Each
|
||||
// hook can return a function; if it is non-nil, it is called after the HTTP
|
||||
// request returns. These functions are called in the reverse order.
|
||||
// RegisterHook should not be called concurrently with itself or SendRequest.
|
||||
func RegisterHook(h Hook) {
|
||||
hooks = append(hooks, h)
|
||||
}
|
||||
|
||||
// SendRequest sends a single HTTP request using the given client.
|
||||
// If ctx is non-nil, it calls all hooks, then sends the request with
|
||||
// ctxhttp.Do, then calls any functions returned by the hooks in reverse order.
|
||||
func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||
if ctx == nil {
|
||||
return client.Do(req)
|
||||
}
|
||||
// Call hooks in order of registration, store returned funcs.
|
||||
post := make([]func(resp *http.Response), len(hooks))
|
||||
for i, h := range hooks {
|
||||
fn := h(ctx, req)
|
||||
post[i] = fn
|
||||
}
|
||||
|
||||
// Send request.
|
||||
resp, err := ctxhttp.Do(ctx, client, req)
|
||||
|
||||
// Call returned funcs in reverse order.
|
||||
for i := len(post) - 1; i >= 0; i-- {
|
||||
if fn := post[i]; fn != nil {
|
||||
fn(resp)
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
|
@ -1339,14 +1339,16 @@
|
|||
"revisionTime": "2017-07-04T19:41:35Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "hrUTmck0J+LE+lBtCvHvemNDY8U=",
|
||||
"checksumSHA1": "NbZXrCwb2NvwV+g6yF0NXSG63A0=",
|
||||
"path": "google.golang.org/api/compute/v1",
|
||||
"revision": "ff0a1ff302946b997eb1832381419d1f95143483"
|
||||
"revision": "e381d638237fe32daa8eaa3321cf8db7b8203965",
|
||||
"revisionTime": "2017-07-07T17:19:04Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "0PgeEtxSD/Vg/53Vctpf0Lk9V/k=",
|
||||
"checksumSHA1": "gvrxuXnqGhfzY0O3MFbS8XhMH/k=",
|
||||
"path": "google.golang.org/api/gensupport",
|
||||
"revision": "ff0a1ff302946b997eb1832381419d1f95143483"
|
||||
"revision": "e665075b5ff79143ba49c58fab02df9dc122afd5",
|
||||
"revisionTime": "2017-07-09T10:32:00Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "yQREK/OWrz9PLljbr127+xFk6J0=",
|
||||
|
|
|
@ -174,6 +174,12 @@ builder.
|
|||
Not required if you run Packer on a GCE instance with a service account.
|
||||
Instructions for creating file or using service accounts are above.
|
||||
|
||||
- `accelerator_count` (int) - Number of guest accelerator cards to add to the launched instance.
|
||||
|
||||
- `accelerator_type` (string) - Full or partial URL of the guest accelerator type. GPU accelerators can only be used with
|
||||
`"on_host_maintenance": "TERMINATE"` option set.
|
||||
Example: `"projects/project_id/zones/europe-west1-b/acceleratorTypes/nvidia-tesla-k80"`
|
||||
|
||||
- `address` (string) - The name of a pre-allocated static external IP address.
|
||||
Note, must be the name and not the actual IP address.
|
||||
|
||||
|
|
Loading…
Reference in New Issue