Merge pull request #5015 from hashicorp/telemetry
Add telemetry reporting through checkpoint
This commit is contained in:
commit
31f2e31949
25
main.go
25
main.go
|
@ -73,10 +73,22 @@ func realMain() int {
|
||||||
outR, outW := io.Pipe()
|
outR, outW := io.Pipe()
|
||||||
go copyOutput(outR, doneCh)
|
go copyOutput(outR, doneCh)
|
||||||
|
|
||||||
|
// Enable checkpoint for panic reporting
|
||||||
|
config, err := loadConfig()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Couldn't load config: %s", err)
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if !config.DisableCheckpoint {
|
||||||
|
packer.CheckpointReporter.Enable(config.DisableCheckpointSignature)
|
||||||
|
}
|
||||||
|
|
||||||
// Create the configuration for panicwrap and wrap our executable
|
// Create the configuration for panicwrap and wrap our executable
|
||||||
wrapConfig.Handler = panicHandler(logTempFile)
|
wrapConfig.Handler = panicHandler(logTempFile)
|
||||||
wrapConfig.Writer = io.MultiWriter(logTempFile, logWriter)
|
wrapConfig.Writer = io.MultiWriter(logTempFile, logWriter)
|
||||||
wrapConfig.Stdout = outW
|
wrapConfig.Stdout = outW
|
||||||
|
wrapConfig.DetectDuration = 500 * time.Millisecond
|
||||||
exitStatus, err := panicwrap.Wrap(&wrapConfig)
|
exitStatus, err := panicwrap.Wrap(&wrapConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "Couldn't start Packer: %s", err)
|
fmt.Fprintf(os.Stderr, "Couldn't start Packer: %s", err)
|
||||||
|
@ -117,9 +129,11 @@ func wrappedMain() int {
|
||||||
log.Printf("Packer Target OS/Arch: %s %s", runtime.GOOS, runtime.GOARCH)
|
log.Printf("Packer Target OS/Arch: %s %s", runtime.GOOS, runtime.GOARCH)
|
||||||
log.Printf("Built with Go Version: %s", runtime.Version())
|
log.Printf("Built with Go Version: %s", runtime.Version())
|
||||||
|
|
||||||
|
inPlugin := os.Getenv(plugin.MagicCookieKey) == plugin.MagicCookieValue
|
||||||
|
|
||||||
// Prepare stdin for plugin usage by switching it to a pipe
|
// Prepare stdin for plugin usage by switching it to a pipe
|
||||||
// But do not switch to pipe in plugin
|
// But do not switch to pipe in plugin
|
||||||
if os.Getenv(plugin.MagicCookieKey) != plugin.MagicCookieValue {
|
if !inPlugin {
|
||||||
setupStdin()
|
setupStdin()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,6 +146,9 @@ func wrappedMain() int {
|
||||||
|
|
||||||
// Fire off the checkpoint.
|
// Fire off the checkpoint.
|
||||||
go runCheckpoint(config)
|
go runCheckpoint(config)
|
||||||
|
if !config.DisableCheckpoint {
|
||||||
|
packer.CheckpointReporter.Enable(config.DisableCheckpointSignature)
|
||||||
|
}
|
||||||
|
|
||||||
cacheDir := os.Getenv("PACKER_CACHE_DIR")
|
cacheDir := os.Getenv("PACKER_CACHE_DIR")
|
||||||
if cacheDir == "" {
|
if cacheDir == "" {
|
||||||
|
@ -196,6 +213,12 @@ func wrappedMain() int {
|
||||||
}
|
}
|
||||||
|
|
||||||
exitCode, err := cli.Run()
|
exitCode, err := cli.Run()
|
||||||
|
if !inPlugin {
|
||||||
|
if err := packer.CheckpointReporter.Finalize(cli.Subcommand(), exitCode, err); err != nil {
|
||||||
|
log.Printf("[WARN] (telemetry) Error finalizing report. This is safe to ignore. %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "Error executing CLI: %s\n", err)
|
fmt.Fprintf(os.Stderr, "Error executing CLI: %s\n", err)
|
||||||
return 1
|
return 1
|
||||||
|
|
|
@ -115,6 +115,7 @@ type coreBuildPostProcessor struct {
|
||||||
// Keeps track of the provisioner and the configuration of the provisioner
|
// Keeps track of the provisioner and the configuration of the provisioner
|
||||||
// within the build.
|
// within the build.
|
||||||
type coreBuildProvisioner struct {
|
type coreBuildProvisioner struct {
|
||||||
|
pType string
|
||||||
provisioner Provisioner
|
provisioner Provisioner
|
||||||
config []interface{}
|
config []interface{}
|
||||||
}
|
}
|
||||||
|
@ -194,8 +195,10 @@ func (b *coreBuild) Run(originalUi Ui, cache Cache) ([]Artifact, error) {
|
||||||
// Add a hook for the provisioners if we have provisioners
|
// Add a hook for the provisioners if we have provisioners
|
||||||
if len(b.provisioners) > 0 {
|
if len(b.provisioners) > 0 {
|
||||||
provisioners := make([]Provisioner, len(b.provisioners))
|
provisioners := make([]Provisioner, len(b.provisioners))
|
||||||
|
provisionerTypes := make([]string, len(b.provisioners))
|
||||||
for i, p := range b.provisioners {
|
for i, p := range b.provisioners {
|
||||||
provisioners[i] = p.provisioner
|
provisioners[i] = p.provisioner
|
||||||
|
provisionerTypes[i] = p.pType
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := hooks[HookProvision]; !ok {
|
if _, ok := hooks[HookProvision]; !ok {
|
||||||
|
@ -204,6 +207,7 @@ func (b *coreBuild) Run(originalUi Ui, cache Cache) ([]Artifact, error) {
|
||||||
|
|
||||||
hooks[HookProvision] = append(hooks[HookProvision], &ProvisionHook{
|
hooks[HookProvision] = append(hooks[HookProvision], &ProvisionHook{
|
||||||
Provisioners: provisioners,
|
Provisioners: provisioners,
|
||||||
|
ProvisionerTypes: provisionerTypes,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -217,7 +221,9 @@ func (b *coreBuild) Run(originalUi Ui, cache Cache) ([]Artifact, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Running builder: %s", b.builderType)
|
log.Printf("Running builder: %s", b.builderType)
|
||||||
|
ts := CheckpointReporter.AddSpan(b.builderType, "builder")
|
||||||
builderArtifact, err := b.builder.Run(builderUi, hook, cache)
|
builderArtifact, err := b.builder.Run(builderUi, hook, cache)
|
||||||
|
ts.End(err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -242,7 +248,9 @@ PostProcessorRunSeqLoop:
|
||||||
}
|
}
|
||||||
|
|
||||||
builderUi.Say(fmt.Sprintf("Running post-processor: %s", corePP.processorType))
|
builderUi.Say(fmt.Sprintf("Running post-processor: %s", corePP.processorType))
|
||||||
|
ts := CheckpointReporter.AddSpan(corePP.processorType, "post-processor")
|
||||||
artifact, keep, err := corePP.processor.PostProcess(ppUi, priorArtifact)
|
artifact, keep, err := corePP.processor.PostProcess(ppUi, priorArtifact)
|
||||||
|
ts.End(err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errors = append(errors, fmt.Errorf("Post-processor failed: %s", err))
|
errors = append(errors, fmt.Errorf("Post-processor failed: %s", err))
|
||||||
continue PostProcessorRunSeqLoop
|
continue PostProcessorRunSeqLoop
|
||||||
|
|
|
@ -15,7 +15,7 @@ func testBuild() *coreBuild {
|
||||||
"foo": {&MockHook{}},
|
"foo": {&MockHook{}},
|
||||||
},
|
},
|
||||||
provisioners: []coreBuildProvisioner{
|
provisioners: []coreBuildProvisioner{
|
||||||
{&MockProvisioner{}, []interface{}{42}},
|
{"mock-provisioner", &MockProvisioner{}, []interface{}{42}},
|
||||||
},
|
},
|
||||||
postProcessors: [][]coreBuildPostProcessor{
|
postProcessors: [][]coreBuildPostProcessor{
|
||||||
{
|
{
|
||||||
|
|
|
@ -154,6 +154,7 @@ func (c *Core) Build(n string) (Build, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioners = append(provisioners, coreBuildProvisioner{
|
provisioners = append(provisioners, coreBuildProvisioner{
|
||||||
|
pType: rawP.Type,
|
||||||
provisioner: provisioner,
|
provisioner: provisioner,
|
||||||
config: config,
|
config: config,
|
||||||
})
|
})
|
||||||
|
|
|
@ -31,6 +31,7 @@ type ProvisionHook struct {
|
||||||
// The provisioners to run as part of the hook. These should already
|
// The provisioners to run as part of the hook. These should already
|
||||||
// be prepared (by calling Prepare) at some earlier stage.
|
// be prepared (by calling Prepare) at some earlier stage.
|
||||||
Provisioners []Provisioner
|
Provisioners []Provisioner
|
||||||
|
ProvisionerTypes []string
|
||||||
|
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
runningProvisioner Provisioner
|
runningProvisioner Provisioner
|
||||||
|
@ -57,12 +58,15 @@ func (h *ProvisionHook) Run(name string, ui Ui, comm Communicator, data interfac
|
||||||
h.runningProvisioner = nil
|
h.runningProvisioner = nil
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for _, p := range h.Provisioners {
|
for i, p := range h.Provisioners {
|
||||||
h.lock.Lock()
|
h.lock.Lock()
|
||||||
h.runningProvisioner = p
|
h.runningProvisioner = p
|
||||||
h.lock.Unlock()
|
h.lock.Unlock()
|
||||||
|
|
||||||
if err := p.Provision(ui, comm); err != nil {
|
ts := CheckpointReporter.AddSpan(h.ProvisionerTypes[i], "provisioner")
|
||||||
|
err := p.Provision(ui, comm)
|
||||||
|
ts.End(err)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ func TestProvisionHook(t *testing.T) {
|
||||||
|
|
||||||
hook := &ProvisionHook{
|
hook := &ProvisionHook{
|
||||||
Provisioners: []Provisioner{pA, pB},
|
Provisioners: []Provisioner{pA, pB},
|
||||||
|
ProvisionerTypes: []string{"", ""},
|
||||||
}
|
}
|
||||||
|
|
||||||
hook.Run("foo", ui, comm, data)
|
hook.Run("foo", ui, comm, data)
|
||||||
|
@ -47,6 +48,7 @@ func TestProvisionHook_nilComm(t *testing.T) {
|
||||||
|
|
||||||
hook := &ProvisionHook{
|
hook := &ProvisionHook{
|
||||||
Provisioners: []Provisioner{pA, pB},
|
Provisioners: []Provisioner{pA, pB},
|
||||||
|
ProvisionerTypes: []string{"", ""},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := hook.Run("foo", ui, comm, data)
|
err := hook.Run("foo", ui, comm, data)
|
||||||
|
@ -73,6 +75,7 @@ func TestProvisionHook_cancel(t *testing.T) {
|
||||||
|
|
||||||
hook := &ProvisionHook{
|
hook := &ProvisionHook{
|
||||||
Provisioners: []Provisioner{p},
|
Provisioners: []Provisioner{p},
|
||||||
|
ProvisionerTypes: []string{""},
|
||||||
}
|
}
|
||||||
|
|
||||||
finished := make(chan struct{})
|
finished := make(chan struct{})
|
||||||
|
|
|
@ -0,0 +1,135 @@
|
||||||
|
package packer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
checkpoint "github.com/hashicorp/go-checkpoint"
|
||||||
|
packerVersion "github.com/hashicorp/packer/version"
|
||||||
|
)
|
||||||
|
|
||||||
|
const TelemetryVersion string = "beta/packer/4"
|
||||||
|
const TelemetryPanicVersion string = "beta/packer_panic/4"
|
||||||
|
|
||||||
|
var CheckpointReporter CheckpointTelemetry
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
CheckpointReporter.startTime = time.Now().UTC()
|
||||||
|
}
|
||||||
|
|
||||||
|
type PackerReport struct {
|
||||||
|
Spans []*TelemetrySpan `json:"spans"`
|
||||||
|
ExitCode int `json:"exit_code"`
|
||||||
|
Error string `json:"error"`
|
||||||
|
Command string `json:"command"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CheckpointTelemetry struct {
|
||||||
|
enabled bool
|
||||||
|
spans []*TelemetrySpan
|
||||||
|
signatureFile string
|
||||||
|
startTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CheckpointTelemetry) Enable(disableSignature bool) {
|
||||||
|
configDir, err := ConfigDir()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[ERR] Checkpoint telemetry setup error: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
signatureFile := ""
|
||||||
|
if disableSignature {
|
||||||
|
log.Printf("[INFO] Checkpoint telemetry signature disabled")
|
||||||
|
} else {
|
||||||
|
signatureFile = filepath.Join(configDir, "checkpoint_signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.signatureFile = signatureFile
|
||||||
|
c.enabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CheckpointTelemetry) baseParams(prefix string) *checkpoint.ReportParams {
|
||||||
|
version := packerVersion.Version
|
||||||
|
if packerVersion.VersionPrerelease != "" {
|
||||||
|
version += "-" + packerVersion.VersionPrerelease
|
||||||
|
}
|
||||||
|
|
||||||
|
return &checkpoint.ReportParams{
|
||||||
|
Product: "packer",
|
||||||
|
SchemaVersion: prefix,
|
||||||
|
StartTime: c.startTime,
|
||||||
|
Version: version,
|
||||||
|
RunID: os.Getenv("PACKER_RUN_UUID"),
|
||||||
|
SignatureFile: c.signatureFile,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CheckpointTelemetry) ReportPanic(m string) error {
|
||||||
|
if !c.enabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
panicParams := c.baseParams(TelemetryPanicVersion)
|
||||||
|
panicParams.Payload = m
|
||||||
|
panicParams.EndTime = time.Now().UTC()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 4500*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
return checkpoint.Report(ctx, panicParams)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CheckpointTelemetry) AddSpan(name, pluginType string) *TelemetrySpan {
|
||||||
|
log.Printf("[TELEMETRY] Starting %s %s", pluginType, name)
|
||||||
|
ts := &TelemetrySpan{
|
||||||
|
Name: name,
|
||||||
|
Type: pluginType,
|
||||||
|
StartTime: time.Now().UTC(),
|
||||||
|
}
|
||||||
|
c.spans = append(c.spans, ts)
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CheckpointTelemetry) Finalize(command string, errCode int, err error) error {
|
||||||
|
if !c.enabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
params := c.baseParams(TelemetryVersion)
|
||||||
|
params.EndTime = time.Now().UTC()
|
||||||
|
|
||||||
|
extra := &PackerReport{
|
||||||
|
Spans: c.spans,
|
||||||
|
ExitCode: errCode,
|
||||||
|
Command: command,
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
extra.Error = err.Error()
|
||||||
|
}
|
||||||
|
params.Payload = extra
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 450*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
return checkpoint.Report(ctx, params)
|
||||||
|
}
|
||||||
|
|
||||||
|
type TelemetrySpan struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
StartTime time.Time `json:"start_time"`
|
||||||
|
EndTime time.Time `json:"end_time"`
|
||||||
|
Error string `json:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *TelemetrySpan) End(err error) {
|
||||||
|
s.EndTime = time.Now().UTC()
|
||||||
|
log.Printf("[TELEMETRY] ending %s", s.Name)
|
||||||
|
if err != nil {
|
||||||
|
s.Error = err.Error()
|
||||||
|
log.Printf("[TELEMETRY] ERROR: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
5
panic.go
5
panic.go
|
@ -6,6 +6,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
"github.com/mitchellh/panicwrap"
|
"github.com/mitchellh/panicwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -34,6 +35,10 @@ func panicHandler(logF *os.File) panicwrap.HandlerFunc {
|
||||||
// shown in case anything below fails.
|
// shown in case anything below fails.
|
||||||
fmt.Fprintf(os.Stderr, fmt.Sprintf("%s\n", m))
|
fmt.Fprintf(os.Stderr, fmt.Sprintf("%s\n", m))
|
||||||
|
|
||||||
|
if err := packer.CheckpointReporter.ReportPanic(m); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to report panic. This is safe to ignore: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Create the crash log file where we'll write the logs
|
// Create the crash log file where we'll write the logs
|
||||||
f, err := os.Create("crash.log")
|
f, err := os.Create("crash.log")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -0,0 +1,70 @@
|
||||||
|
// Package endpoints validates regional endpoints for services.
|
||||||
|
package endpoints
|
||||||
|
|
||||||
|
//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
|
||||||
|
//go:generate gofmt -s -w endpoints_map.go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NormalizeEndpoint takes and endpoint and service API information to return a
|
||||||
|
// normalized endpoint and signing region. If the endpoint is not an empty string
|
||||||
|
// the service name and region will be used to look up the service's API endpoint.
|
||||||
|
// If the endpoint is provided the scheme will be added if it is not present.
|
||||||
|
func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL, useDualStack bool) (normEndpoint, signingRegion string) {
|
||||||
|
if endpoint == "" {
|
||||||
|
return EndpointForRegion(serviceName, region, disableSSL, useDualStack)
|
||||||
|
}
|
||||||
|
|
||||||
|
return AddScheme(endpoint, disableSSL), ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointForRegion returns an endpoint and its signing region for a service and region.
|
||||||
|
// if the service and region pair are not found endpoint and signingRegion will be empty.
|
||||||
|
func EndpointForRegion(svcName, region string, disableSSL, useDualStack bool) (endpoint, signingRegion string) {
|
||||||
|
dualStackField := ""
|
||||||
|
if useDualStack {
|
||||||
|
dualStackField = "/dualstack"
|
||||||
|
}
|
||||||
|
|
||||||
|
derivedKeys := []string{
|
||||||
|
region + "/" + svcName + dualStackField,
|
||||||
|
region + "/*" + dualStackField,
|
||||||
|
"*/" + svcName + dualStackField,
|
||||||
|
"*/*" + dualStackField,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range derivedKeys {
|
||||||
|
if val, ok := endpointsMap.Endpoints[key]; ok {
|
||||||
|
ep := val.Endpoint
|
||||||
|
ep = strings.Replace(ep, "{region}", region, -1)
|
||||||
|
ep = strings.Replace(ep, "{service}", svcName, -1)
|
||||||
|
|
||||||
|
endpoint = ep
|
||||||
|
signingRegion = val.SigningRegion
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return AddScheme(endpoint, disableSSL), signingRegion
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regular expression to determine if the endpoint string is prefixed with a scheme.
|
||||||
|
var schemeRE = regexp.MustCompile("^([^:]+)://")
|
||||||
|
|
||||||
|
// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
|
||||||
|
// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS.
|
||||||
|
func AddScheme(endpoint string, disableSSL bool) string {
|
||||||
|
if endpoint != "" && !schemeRE.MatchString(endpoint) {
|
||||||
|
scheme := "https"
|
||||||
|
if disableSSL {
|
||||||
|
scheme = "http"
|
||||||
|
}
|
||||||
|
endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
return endpoint
|
||||||
|
}
|
|
@ -0,0 +1,78 @@
|
||||||
|
{
|
||||||
|
"version": 2,
|
||||||
|
"endpoints": {
|
||||||
|
"*/*": {
|
||||||
|
"endpoint": "{service}.{region}.amazonaws.com"
|
||||||
|
},
|
||||||
|
"cn-north-1/*": {
|
||||||
|
"endpoint": "{service}.{region}.amazonaws.com.cn",
|
||||||
|
"signatureVersion": "v4"
|
||||||
|
},
|
||||||
|
"cn-north-1/ec2metadata": {
|
||||||
|
"endpoint": "http://169.254.169.254/latest"
|
||||||
|
},
|
||||||
|
"us-gov-west-1/iam": {
|
||||||
|
"endpoint": "iam.us-gov.amazonaws.com"
|
||||||
|
},
|
||||||
|
"us-gov-west-1/sts": {
|
||||||
|
"endpoint": "sts.us-gov-west-1.amazonaws.com"
|
||||||
|
},
|
||||||
|
"us-gov-west-1/s3": {
|
||||||
|
"endpoint": "s3-{region}.amazonaws.com"
|
||||||
|
},
|
||||||
|
"us-gov-west-1/ec2metadata": {
|
||||||
|
"endpoint": "http://169.254.169.254/latest"
|
||||||
|
},
|
||||||
|
"*/cloudfront": {
|
||||||
|
"endpoint": "cloudfront.amazonaws.com",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/cloudsearchdomain": {
|
||||||
|
"endpoint": "",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/data.iot": {
|
||||||
|
"endpoint": "",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/ec2metadata": {
|
||||||
|
"endpoint": "http://169.254.169.254/latest"
|
||||||
|
},
|
||||||
|
"*/iam": {
|
||||||
|
"endpoint": "iam.amazonaws.com",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/importexport": {
|
||||||
|
"endpoint": "importexport.amazonaws.com",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/route53": {
|
||||||
|
"endpoint": "route53.amazonaws.com",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/sts": {
|
||||||
|
"endpoint": "sts.amazonaws.com",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/waf": {
|
||||||
|
"endpoint": "waf.amazonaws.com",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"us-east-1/sdb": {
|
||||||
|
"endpoint": "sdb.amazonaws.com",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/s3": {
|
||||||
|
"endpoint": "s3-{region}.amazonaws.com"
|
||||||
|
},
|
||||||
|
"*/s3/dualstack": {
|
||||||
|
"endpoint": "s3.dualstack.{region}.amazonaws.com"
|
||||||
|
},
|
||||||
|
"us-east-1/s3": {
|
||||||
|
"endpoint": "s3.amazonaws.com"
|
||||||
|
},
|
||||||
|
"eu-central-1/s3": {
|
||||||
|
"endpoint": "{service}.{region}.amazonaws.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
91
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
generated
vendored
Normal file
91
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
package endpoints
|
||||||
|
|
||||||
|
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||||
|
|
||||||
|
type endpointStruct struct {
|
||||||
|
Version int
|
||||||
|
Endpoints map[string]endpointEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
type endpointEntry struct {
|
||||||
|
Endpoint string
|
||||||
|
SigningRegion string
|
||||||
|
}
|
||||||
|
|
||||||
|
var endpointsMap = endpointStruct{
|
||||||
|
Version: 2,
|
||||||
|
Endpoints: map[string]endpointEntry{
|
||||||
|
"*/*": {
|
||||||
|
Endpoint: "{service}.{region}.amazonaws.com",
|
||||||
|
},
|
||||||
|
"*/cloudfront": {
|
||||||
|
Endpoint: "cloudfront.amazonaws.com",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"*/cloudsearchdomain": {
|
||||||
|
Endpoint: "",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"*/data.iot": {
|
||||||
|
Endpoint: "",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"*/ec2metadata": {
|
||||||
|
Endpoint: "http://169.254.169.254/latest",
|
||||||
|
},
|
||||||
|
"*/iam": {
|
||||||
|
Endpoint: "iam.amazonaws.com",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"*/importexport": {
|
||||||
|
Endpoint: "importexport.amazonaws.com",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"*/route53": {
|
||||||
|
Endpoint: "route53.amazonaws.com",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"*/s3": {
|
||||||
|
Endpoint: "s3-{region}.amazonaws.com",
|
||||||
|
},
|
||||||
|
"*/s3/dualstack": {
|
||||||
|
Endpoint: "s3.dualstack.{region}.amazonaws.com",
|
||||||
|
},
|
||||||
|
"*/sts": {
|
||||||
|
Endpoint: "sts.amazonaws.com",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"*/waf": {
|
||||||
|
Endpoint: "waf.amazonaws.com",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"cn-north-1/*": {
|
||||||
|
Endpoint: "{service}.{region}.amazonaws.com.cn",
|
||||||
|
},
|
||||||
|
"cn-north-1/ec2metadata": {
|
||||||
|
Endpoint: "http://169.254.169.254/latest",
|
||||||
|
},
|
||||||
|
"eu-central-1/s3": {
|
||||||
|
Endpoint: "{service}.{region}.amazonaws.com",
|
||||||
|
},
|
||||||
|
"us-east-1/s3": {
|
||||||
|
Endpoint: "s3.amazonaws.com",
|
||||||
|
},
|
||||||
|
"us-east-1/sdb": {
|
||||||
|
Endpoint: "sdb.amazonaws.com",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"us-gov-west-1/ec2metadata": {
|
||||||
|
Endpoint: "http://169.254.169.254/latest",
|
||||||
|
},
|
||||||
|
"us-gov-west-1/iam": {
|
||||||
|
Endpoint: "iam.us-gov.amazonaws.com",
|
||||||
|
},
|
||||||
|
"us-gov-west-1/s3": {
|
||||||
|
Endpoint: "s3-{region}.amazonaws.com",
|
||||||
|
},
|
||||||
|
"us-gov-west-1/sts": {
|
||||||
|
Endpoint: "sts.us-gov-west-1.amazonaws.com",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
|
@ -1,7 +1,7 @@
|
||||||
# Go Checkpoint Client
|
# Go Checkpoint Client
|
||||||
|
|
||||||
[Checkpoint](http://checkpoint.hashicorp.com) is an internal service at
|
[Checkpoint](http://checkpoint.hashicorp.com) is an internal service at
|
||||||
Hashicorp that we use to check version information, broadcoast security
|
Hashicorp that we use to check version information, broadcast security
|
||||||
bulletins, etc.
|
bulletins, etc.
|
||||||
|
|
||||||
We understand that software making remote calls over the internet
|
We understand that software making remote calls over the internet
|
||||||
|
@ -10,7 +10,7 @@ disabled in all of our software that includes it. You can view the source
|
||||||
of this client to see that we're not sending any private information.
|
of this client to see that we're not sending any private information.
|
||||||
|
|
||||||
Each Hashicorp application has it's specific configuration option
|
Each Hashicorp application has it's specific configuration option
|
||||||
to disable chekpoint calls, but the `CHECKPOINT_DISABLE` makes
|
to disable checkpoint calls, but the `CHECKPOINT_DISABLE` makes
|
||||||
the underlying checkpoint component itself disabled. For example
|
the underlying checkpoint component itself disabled. For example
|
||||||
in the case of packer:
|
in the case of packer:
|
||||||
```
|
```
|
||||||
|
|
|
@ -3,6 +3,8 @@
|
||||||
package checkpoint
|
package checkpoint
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
@ -20,10 +22,117 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/go-cleanhttp"
|
"github.com/hashicorp/go-cleanhttp"
|
||||||
|
uuid "github.com/hashicorp/go-uuid"
|
||||||
)
|
)
|
||||||
|
|
||||||
var magicBytes [4]byte = [4]byte{0x35, 0x77, 0x69, 0xFB}
|
var magicBytes [4]byte = [4]byte{0x35, 0x77, 0x69, 0xFB}
|
||||||
|
|
||||||
|
// ReportParams are the parameters for configuring a telemetry report.
|
||||||
|
type ReportParams struct {
|
||||||
|
// Signature is some random signature that should be stored and used
|
||||||
|
// as a cookie-like value. This ensures that alerts aren't repeated.
|
||||||
|
// If the signature is changed, repeat alerts may be sent down. The
|
||||||
|
// signature should NOT be anything identifiable to a user (such as
|
||||||
|
// a MAC address). It should be random.
|
||||||
|
//
|
||||||
|
// If SignatureFile is given, then the signature will be read from this
|
||||||
|
// file. If the file doesn't exist, then a random signature will
|
||||||
|
// automatically be generated and stored here. SignatureFile will be
|
||||||
|
// ignored if Signature is given.
|
||||||
|
Signature string `json:"signature"`
|
||||||
|
SignatureFile string `json:"-"`
|
||||||
|
|
||||||
|
StartTime time.Time `json:"start_time"`
|
||||||
|
EndTime time.Time `json:"end_time"`
|
||||||
|
Arch string `json:"arch"`
|
||||||
|
Args []string `json:"args"`
|
||||||
|
OS string `json:"os"`
|
||||||
|
Payload interface{} `json:"payload,omitempty"`
|
||||||
|
Product string `json:"product"`
|
||||||
|
RunID string `json:"run_id"`
|
||||||
|
SchemaVersion string `json:"schema_version"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ReportParams) signature() string {
|
||||||
|
signature := i.Signature
|
||||||
|
if i.Signature == "" && i.SignatureFile != "" {
|
||||||
|
var err error
|
||||||
|
signature, err = checkSignature(i.SignatureFile)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return signature
|
||||||
|
}
|
||||||
|
|
||||||
|
// Report sends telemetry information to checkpoint
|
||||||
|
func Report(ctx context.Context, r *ReportParams) error {
|
||||||
|
if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := ReportRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
client := cleanhttp.DefaultClient()
|
||||||
|
resp, err := client.Do(req.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.StatusCode != 201 {
|
||||||
|
return fmt.Errorf("Unknown status: %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReportRequest creates a request object for making a report
|
||||||
|
func ReportRequest(r *ReportParams) (*http.Request, error) {
|
||||||
|
// Populate some fields automatically if we can
|
||||||
|
if r.RunID == "" {
|
||||||
|
uuid, err := uuid.GenerateUUID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.RunID = uuid
|
||||||
|
}
|
||||||
|
if r.Arch == "" {
|
||||||
|
r.Arch = runtime.GOARCH
|
||||||
|
}
|
||||||
|
if r.OS == "" {
|
||||||
|
r.OS = runtime.GOOS
|
||||||
|
}
|
||||||
|
if len(r.Args) == 0 {
|
||||||
|
r.Args = os.Args
|
||||||
|
}
|
||||||
|
if r.Signature == "" {
|
||||||
|
r.Signature = r.signature()
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.Marshal(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
u := &url.URL{
|
||||||
|
Scheme: "https",
|
||||||
|
Host: "checkpoint-api.hashicorp.com",
|
||||||
|
Path: fmt.Sprintf("/v1/telemetry/%s", r.Product),
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", u.String(), bytes.NewReader(b))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Add("Accept", "application/json")
|
||||||
|
req.Header.Add("User-Agent", "HashiCorp/go-checkpoint")
|
||||||
|
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
// CheckParams are the parameters for configuring a check request.
|
// CheckParams are the parameters for configuring a check request.
|
||||||
type CheckParams struct {
|
type CheckParams struct {
|
||||||
// Product and version are used to lookup the correct product and
|
// Product and version are used to lookup the correct product and
|
||||||
|
|
|
@ -12,13 +12,16 @@ package panicwrap
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"github.com/kardianos/osext"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"runtime"
|
||||||
|
"sync/atomic"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/kardianos/osext"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -61,6 +64,17 @@ type WrapConfig struct {
|
||||||
// The writer to send stdout to. If this is nil, then it defaults to
|
// The writer to send stdout to. If this is nil, then it defaults to
|
||||||
// os.Stdout.
|
// os.Stdout.
|
||||||
Stdout io.Writer
|
Stdout io.Writer
|
||||||
|
|
||||||
|
// Catch and igore these signals in the parent process, let the child
|
||||||
|
// handle them gracefully.
|
||||||
|
IgnoreSignals []os.Signal
|
||||||
|
|
||||||
|
// Catch these signals in the parent process and manually forward
|
||||||
|
// them to the child process. Some signals such as SIGINT are usually
|
||||||
|
// sent to the entire process group so setting it isn't necessary. Other
|
||||||
|
// signals like SIGTERM are only sent to the parent process and need
|
||||||
|
// to be forwarded. This defaults to empty.
|
||||||
|
ForwardSignals []os.Signal
|
||||||
}
|
}
|
||||||
|
|
||||||
// BasicWrap calls Wrap with the given handler function, using defaults
|
// BasicWrap calls Wrap with the given handler function, using defaults
|
||||||
|
@ -145,6 +159,13 @@ func Wrap(c *WrapConfig) (int, error) {
|
||||||
cmd.Stdin = os.Stdin
|
cmd.Stdin = os.Stdin
|
||||||
cmd.Stdout = stdout_w
|
cmd.Stdout = stdout_w
|
||||||
cmd.Stderr = stderr_w
|
cmd.Stderr = stderr_w
|
||||||
|
|
||||||
|
// Windows doesn't support this, but on other platforms pass in
|
||||||
|
// the original file descriptors so they can be used.
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
cmd.ExtraFiles = []*os.File{os.Stdin, os.Stdout, os.Stderr}
|
||||||
|
}
|
||||||
|
|
||||||
if err := cmd.Start(); err != nil {
|
if err := cmd.Start(); err != nil {
|
||||||
return 1, err
|
return 1, err
|
||||||
}
|
}
|
||||||
|
@ -152,13 +173,23 @@ func Wrap(c *WrapConfig) (int, error) {
|
||||||
// Listen to signals and capture them forever. We allow the child
|
// Listen to signals and capture them forever. We allow the child
|
||||||
// process to handle them in some way.
|
// process to handle them in some way.
|
||||||
sigCh := make(chan os.Signal)
|
sigCh := make(chan os.Signal)
|
||||||
signal.Notify(sigCh, os.Interrupt)
|
fwdSigCh := make(chan os.Signal)
|
||||||
|
if len(c.IgnoreSignals) == 0 {
|
||||||
|
c.IgnoreSignals = []os.Signal{os.Interrupt}
|
||||||
|
}
|
||||||
|
signal.Notify(sigCh, c.IgnoreSignals...)
|
||||||
|
signal.Notify(fwdSigCh, c.ForwardSignals...)
|
||||||
go func() {
|
go func() {
|
||||||
defer signal.Stop(sigCh)
|
defer signal.Stop(sigCh)
|
||||||
|
defer signal.Stop(fwdSigCh)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-doneCh:
|
case <-doneCh:
|
||||||
return
|
return
|
||||||
|
case s := <-fwdSigCh:
|
||||||
|
if cmd.Process != nil {
|
||||||
|
cmd.Process.Signal(s)
|
||||||
|
}
|
||||||
case <-sigCh:
|
case <-sigCh:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -200,7 +231,17 @@ func Wrap(c *WrapConfig) (int, error) {
|
||||||
//
|
//
|
||||||
// Wrapped is very cheap and can be used early to short-circuit some pre-wrap
|
// Wrapped is very cheap and can be used early to short-circuit some pre-wrap
|
||||||
// logic your application may have.
|
// logic your application may have.
|
||||||
|
//
|
||||||
|
// If the given configuration is nil, then this will return a cached
|
||||||
|
// value of Wrapped. This is useful because Wrapped is usually called early
|
||||||
|
// to verify a process hasn't been wrapped before wrapping. After this,
|
||||||
|
// the value of Wrapped hardly changes and is process-global, so other
|
||||||
|
// libraries can check with Wrapped(nil).
|
||||||
func Wrapped(c *WrapConfig) bool {
|
func Wrapped(c *WrapConfig) bool {
|
||||||
|
if c == nil {
|
||||||
|
return wrapCache.Load().(bool)
|
||||||
|
}
|
||||||
|
|
||||||
if c.CookieKey == "" {
|
if c.CookieKey == "" {
|
||||||
c.CookieKey = DEFAULT_COOKIE_KEY
|
c.CookieKey = DEFAULT_COOKIE_KEY
|
||||||
}
|
}
|
||||||
|
@ -211,7 +252,16 @@ func Wrapped(c *WrapConfig) bool {
|
||||||
|
|
||||||
// If the cookie key/value match our environment, then we are the
|
// If the cookie key/value match our environment, then we are the
|
||||||
// child, so just exit now and tell the caller that we're the child
|
// child, so just exit now and tell the caller that we're the child
|
||||||
return os.Getenv(c.CookieKey) == c.CookieValue
|
result := os.Getenv(c.CookieKey) == c.CookieValue
|
||||||
|
wrapCache.Store(result)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrapCache is the cached value for Wrapped when called with nil
|
||||||
|
var wrapCache atomic.Value
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
wrapCache.Store(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// trackPanic monitors the given reader for a panic. If a panic is detected,
|
// trackPanic monitors the given reader for a panic. If a panic is detected,
|
||||||
|
|
|
@ -546,9 +546,10 @@
|
||||||
"revision": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
|
"revision": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "nd3S1qkFv7zZxA9be0bw4nT0pe0=",
|
"checksumSHA1": "EPwsEGG/9t4sCexmFYnlZpE548A=",
|
||||||
"path": "github.com/hashicorp/go-checkpoint",
|
"path": "github.com/hashicorp/go-checkpoint",
|
||||||
"revision": "e4b2dc34c0f698ee04750bf2035d8b9384233e1b"
|
"revision": "04fd58160a0619a814172a795aa173fa64be731c",
|
||||||
|
"revisionTime": "2017-06-17T00:44:57Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "fSe5y1UgTDeYlnFfUcDA1zzcw+U=",
|
"checksumSHA1": "fSe5y1UgTDeYlnFfUcDA1zzcw+U=",
|
||||||
|
@ -718,9 +719,10 @@
|
||||||
"revisionTime": "2017-03-16T18:53:39Z"
|
"revisionTime": "2017-03-16T18:53:39Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "VBo7ciCNRr7wNVFmBTW8sm4PQ14=",
|
"checksumSHA1": "m2L8ohfZiFRsMW3iynaH/TWgnSY=",
|
||||||
"path": "github.com/mitchellh/panicwrap",
|
"path": "github.com/mitchellh/panicwrap",
|
||||||
"revision": "a1e50bc201f387747a45ffff020f1af2d8759e88"
|
"revision": "fce601fe55579125e1b3cb0b992287e7290f7b83",
|
||||||
|
"revisionTime": "2017-01-06T18:23:40Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "h+ODp7a8Vj8XMUsORLbhtQMWOO4=",
|
"checksumSHA1": "h+ODp7a8Vj8XMUsORLbhtQMWOO4=",
|
||||||
|
|
Loading…
Reference in New Issue