Use the hashicorp/go-getter to download files
* removed packer.Cache and references since packer.Cache is never used except in the download step. The download step now uses the new func packer.CachePath(targetPath) for this, the behavior is the same.
* removed download code from packer that was reimplemented into the go-getter library: progress bar, http download restart, checksuming from file, skip already downloaded files, symlinking, make a download cancellable by context.
* on windows if packer is running without symlinking rights and we are getting a local file, the file will be copied instead to avoid errors.
* added unit tests for step_download that are now CI tested on windows, mac & linux.
* files are now downloaded under cache dir `sha1(filename + "?checksum=" + checksum) + file_extension`
* since the output dir is based on the source url and the checksum, when the checksum fails, the file is auto deleted.
* a download file is protected and locked by a file lock,
* updated docs
* updated go modules and vendors
2019-03-13 07:11:58 -04:00
|
|
|
// Copyright 2017, OpenCensus Authors
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
//
|
|
|
|
|
|
|
|
package view
|
|
|
|
|
|
|
|
import (
|
|
|
|
"sort"
|
2019-04-20 19:53:34 -04:00
|
|
|
"time"
|
Use the hashicorp/go-getter to download files
* removed packer.Cache and references since packer.Cache is never used except in the download step. The download step now uses the new func packer.CachePath(targetPath) for this, the behavior is the same.
* removed download code from packer that was reimplemented into the go-getter library: progress bar, http download restart, checksuming from file, skip already downloaded files, symlinking, make a download cancellable by context.
* on windows if packer is running without symlinking rights and we are getting a local file, the file will be copied instead to avoid errors.
* added unit tests for step_download that are now CI tested on windows, mac & linux.
* files are now downloaded under cache dir `sha1(filename + "?checksum=" + checksum) + file_extension`
* since the output dir is based on the source url and the checksum, when the checksum fails, the file is auto deleted.
* a download file is protected and locked by a file lock,
* updated docs
* updated go modules and vendors
2019-03-13 07:11:58 -04:00
|
|
|
|
|
|
|
"go.opencensus.io/internal/tagencoding"
|
|
|
|
"go.opencensus.io/tag"
|
|
|
|
)
|
|
|
|
|
|
|
|
type collector struct {
|
|
|
|
// signatures holds the aggregations values for each unique tag signature
|
|
|
|
// (values for all keys) to its aggregator.
|
|
|
|
signatures map[string]AggregationData
|
|
|
|
// Aggregation is the description of the aggregation to perform for this
|
|
|
|
// view.
|
|
|
|
a *Aggregation
|
|
|
|
}
|
|
|
|
|
2019-04-20 19:53:34 -04:00
|
|
|
func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) {
|
Use the hashicorp/go-getter to download files
* removed packer.Cache and references since packer.Cache is never used except in the download step. The download step now uses the new func packer.CachePath(targetPath) for this, the behavior is the same.
* removed download code from packer that was reimplemented into the go-getter library: progress bar, http download restart, checksuming from file, skip already downloaded files, symlinking, make a download cancellable by context.
* on windows if packer is running without symlinking rights and we are getting a local file, the file will be copied instead to avoid errors.
* added unit tests for step_download that are now CI tested on windows, mac & linux.
* files are now downloaded under cache dir `sha1(filename + "?checksum=" + checksum) + file_extension`
* since the output dir is based on the source url and the checksum, when the checksum fails, the file is auto deleted.
* a download file is protected and locked by a file lock,
* updated docs
* updated go modules and vendors
2019-03-13 07:11:58 -04:00
|
|
|
aggregator, ok := c.signatures[s]
|
|
|
|
if !ok {
|
|
|
|
aggregator = c.a.newData()
|
|
|
|
c.signatures[s] = aggregator
|
|
|
|
}
|
2019-04-20 19:53:34 -04:00
|
|
|
aggregator.addSample(v, attachments, t)
|
Use the hashicorp/go-getter to download files
* removed packer.Cache and references since packer.Cache is never used except in the download step. The download step now uses the new func packer.CachePath(targetPath) for this, the behavior is the same.
* removed download code from packer that was reimplemented into the go-getter library: progress bar, http download restart, checksuming from file, skip already downloaded files, symlinking, make a download cancellable by context.
* on windows if packer is running without symlinking rights and we are getting a local file, the file will be copied instead to avoid errors.
* added unit tests for step_download that are now CI tested on windows, mac & linux.
* files are now downloaded under cache dir `sha1(filename + "?checksum=" + checksum) + file_extension`
* since the output dir is based on the source url and the checksum, when the checksum fails, the file is auto deleted.
* a download file is protected and locked by a file lock,
* updated docs
* updated go modules and vendors
2019-03-13 07:11:58 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// collectRows returns a snapshot of the collected Row values.
|
|
|
|
func (c *collector) collectedRows(keys []tag.Key) []*Row {
|
|
|
|
rows := make([]*Row, 0, len(c.signatures))
|
|
|
|
for sig, aggregator := range c.signatures {
|
|
|
|
tags := decodeTags([]byte(sig), keys)
|
|
|
|
row := &Row{Tags: tags, Data: aggregator.clone()}
|
|
|
|
rows = append(rows, row)
|
|
|
|
}
|
|
|
|
return rows
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *collector) clearRows() {
|
|
|
|
c.signatures = make(map[string]AggregationData)
|
|
|
|
}
|
|
|
|
|
|
|
|
// encodeWithKeys encodes the map by using values
|
|
|
|
// only associated with the keys provided.
|
|
|
|
func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte {
|
|
|
|
vb := &tagencoding.Values{
|
|
|
|
Buffer: make([]byte, len(keys)),
|
|
|
|
}
|
|
|
|
for _, k := range keys {
|
|
|
|
v, _ := m.Value(k)
|
|
|
|
vb.WriteValue([]byte(v))
|
|
|
|
}
|
|
|
|
return vb.Bytes()
|
|
|
|
}
|
|
|
|
|
|
|
|
// decodeTags decodes tags from the buffer and
|
|
|
|
// orders them by the keys.
|
|
|
|
func decodeTags(buf []byte, keys []tag.Key) []tag.Tag {
|
|
|
|
vb := &tagencoding.Values{Buffer: buf}
|
|
|
|
var tags []tag.Tag
|
|
|
|
for _, k := range keys {
|
|
|
|
v := vb.ReadValue()
|
|
|
|
if v != nil {
|
|
|
|
tags = append(tags, tag.Tag{Key: k, Value: string(v)})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
vb.ReadIndex = 0
|
|
|
|
sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() })
|
|
|
|
return tags
|
|
|
|
}
|