2013-06-12 20:41:44 -04:00
|
|
|
|
package common
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"bytes"
|
2013-07-14 02:50:34 -04:00
|
|
|
|
"crypto/md5"
|
2013-07-14 03:13:07 -04:00
|
|
|
|
"crypto/sha1"
|
|
|
|
|
"crypto/sha256"
|
2013-08-28 12:09:37 -04:00
|
|
|
|
"crypto/sha512"
|
2013-06-12 20:41:44 -04:00
|
|
|
|
"encoding/hex"
|
|
|
|
|
"errors"
|
|
|
|
|
"fmt"
|
|
|
|
|
"hash"
|
|
|
|
|
"io"
|
2013-06-28 22:34:43 -04:00
|
|
|
|
"log"
|
2013-06-12 20:41:44 -04:00
|
|
|
|
"net/url"
|
|
|
|
|
"os"
|
2018-01-03 17:34:11 -05:00
|
|
|
|
"runtime"
|
2015-11-01 22:46:14 -05:00
|
|
|
|
"path"
|
|
|
|
|
"path/filepath"
|
|
|
|
|
"strings"
|
2013-06-12 20:41:44 -04:00
|
|
|
|
)
|
|
|
|
|
|
2016-04-05 14:11:30 -04:00
|
|
|
|
import (
|
|
|
|
|
"net/http"
|
|
|
|
|
"github.com/jlaffeye/ftp"
|
|
|
|
|
"bufio"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
2013-06-12 20:41:44 -04:00
|
|
|
|
// DownloadConfig is the configuration given to instantiate a new
|
|
|
|
|
// download instance. Once a configuration is used to instantiate
|
|
|
|
|
// a download client, it must not be modified.
|
|
|
|
|
type DownloadConfig struct {
|
|
|
|
|
// The source URL in the form of a string.
|
|
|
|
|
Url string
|
|
|
|
|
|
|
|
|
|
// This is the path to download the file to.
|
|
|
|
|
TargetPath string
|
|
|
|
|
|
|
|
|
|
// DownloaderMap maps a schema to a Download.
|
|
|
|
|
DownloaderMap map[string]Downloader
|
|
|
|
|
|
|
|
|
|
// If true, this will copy even a local file to the target
|
|
|
|
|
// location. If false, then it will "download" the file by just
|
|
|
|
|
// returning the local path to the file.
|
|
|
|
|
CopyFile bool
|
|
|
|
|
|
|
|
|
|
// The hashing implementation to use to checksum the downloaded file.
|
|
|
|
|
Hash hash.Hash
|
|
|
|
|
|
|
|
|
|
// The checksum for the downloaded file. The hash implementation configuration
|
|
|
|
|
// for the downloader will be used to verify with this checksum after
|
|
|
|
|
// it is downloaded.
|
|
|
|
|
Checksum []byte
|
2014-01-09 11:41:34 -05:00
|
|
|
|
|
|
|
|
|
// What to use for the user agent for HTTP requests. If set to "", use the
|
|
|
|
|
// default user agent provided by Go.
|
|
|
|
|
UserAgent string
|
2013-06-12 20:41:44 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// A DownloadClient helps download, verify checksums, etc.
|
|
|
|
|
type DownloadClient struct {
|
|
|
|
|
config *DownloadConfig
|
|
|
|
|
downloader Downloader
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-14 02:50:34 -04:00
|
|
|
|
// HashForType returns the Hash implementation for the given string
|
|
|
|
|
// type, or nil if the type is not supported.
|
|
|
|
|
func HashForType(t string) hash.Hash {
|
|
|
|
|
switch t {
|
|
|
|
|
case "md5":
|
|
|
|
|
return md5.New()
|
2013-07-14 03:13:07 -04:00
|
|
|
|
case "sha1":
|
|
|
|
|
return sha1.New()
|
|
|
|
|
case "sha256":
|
|
|
|
|
return sha256.New()
|
2013-08-28 12:09:37 -04:00
|
|
|
|
case "sha512":
|
|
|
|
|
return sha512.New()
|
2013-07-14 02:50:34 -04:00
|
|
|
|
default:
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-12 20:41:44 -04:00
|
|
|
|
// NewDownloadClient returns a new DownloadClient for the given
|
|
|
|
|
// configuration.
|
|
|
|
|
func NewDownloadClient(c *DownloadConfig) *DownloadClient {
|
2016-04-05 14:11:30 -04:00
|
|
|
|
const mtu = 1500 /* ethernet */ - 20 /* ipv4 */ - 20 /* tcp */
|
|
|
|
|
|
2013-06-12 20:41:44 -04:00
|
|
|
|
if c.DownloaderMap == nil {
|
|
|
|
|
c.DownloaderMap = map[string]Downloader{
|
2016-04-05 14:11:30 -04:00
|
|
|
|
"file": &FileDownloader{bufferSize: nil},
|
|
|
|
|
"ftp": &FTPDownloader{userInfo: url.Userinfo{username:"anonymous", password: "anonymous@"}, mtu: mtu},
|
2014-01-09 11:41:34 -05:00
|
|
|
|
"http": &HTTPDownloader{userAgent: c.UserAgent},
|
|
|
|
|
"https": &HTTPDownloader{userAgent: c.UserAgent},
|
2016-04-05 14:11:30 -04:00
|
|
|
|
"smb": &SMBDownloader{bufferSize: nil}
|
2013-06-12 20:41:44 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return &DownloadClient{config: c}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// A downloader is responsible for actually taking a remote URL and
|
|
|
|
|
// downloading it.
|
|
|
|
|
type Downloader interface {
|
|
|
|
|
Cancel()
|
2015-06-15 02:43:25 -04:00
|
|
|
|
Download(*os.File, *url.URL) error
|
2013-06-12 20:41:44 -04:00
|
|
|
|
Progress() uint
|
|
|
|
|
Total() uint
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (d *DownloadClient) Cancel() {
|
|
|
|
|
// TODO(mitchellh): Implement
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (d *DownloadClient) Get() (string, error) {
|
|
|
|
|
// If we already have the file and it matches, then just return the target path.
|
|
|
|
|
if verify, _ := d.VerifyChecksum(d.config.TargetPath); verify {
|
2015-08-19 16:15:23 -04:00
|
|
|
|
log.Println("[DEBUG] Initial checksum matched, no download needed.")
|
2013-06-12 20:41:44 -04:00
|
|
|
|
return d.config.TargetPath, nil
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-03 05:36:29 -04:00
|
|
|
|
u, err := url.Parse(d.config.Url)
|
2013-06-12 20:41:44 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-03 05:36:29 -04:00
|
|
|
|
log.Printf("Parsed URL: %#v", u)
|
2013-06-28 22:34:43 -04:00
|
|
|
|
|
2016-04-05 14:11:30 -04:00
|
|
|
|
/* FIXME:
|
|
|
|
|
handle the special case of d.config.CopyFile which returns the path
|
|
|
|
|
in an os-specific format.
|
|
|
|
|
*/
|
|
|
|
|
|
2013-06-12 20:41:44 -04:00
|
|
|
|
// Files when we don't copy the file are special cased.
|
2015-06-22 15:14:35 -04:00
|
|
|
|
var f *os.File
|
2013-06-12 20:41:44 -04:00
|
|
|
|
var finalPath string
|
2015-08-14 20:49:08 -04:00
|
|
|
|
sourcePath := ""
|
2016-07-03 05:36:29 -04:00
|
|
|
|
if u.Scheme == "file" && !d.config.CopyFile {
|
|
|
|
|
// This is special case for relative path in this case user specify
|
|
|
|
|
// file:../ and after parse destination goes to Opaque
|
|
|
|
|
if u.Path != "" {
|
|
|
|
|
// If url.Path is set just use this
|
|
|
|
|
finalPath = u.Path
|
|
|
|
|
} else if u.Opaque != "" {
|
|
|
|
|
// otherwise try url.Opaque
|
|
|
|
|
finalPath = u.Opaque
|
|
|
|
|
}
|
2015-08-14 20:49:08 -04:00
|
|
|
|
// This is a special case where we use a source file that already exists
|
|
|
|
|
// locally and we don't make a copy. Normally we would copy or download.
|
2015-08-19 16:15:23 -04:00
|
|
|
|
log.Printf("[DEBUG] Using local file: %s", finalPath)
|
2013-08-03 16:34:48 -04:00
|
|
|
|
|
2016-01-18 18:36:47 -05:00
|
|
|
|
// transform the actual file uri to a windowsy path if we're being windowsy.
|
2015-11-01 22:46:14 -05:00
|
|
|
|
if runtime.GOOS == "windows" {
|
2016-01-18 18:36:47 -05:00
|
|
|
|
// FIXME: cwd should point to a path relative to the TEMPLATE path,
|
|
|
|
|
// but since this isn't exposed to us anywhere, we use os.Getwd()
|
|
|
|
|
// and assume the user ran packer in the same directory that
|
|
|
|
|
// any relative files are located at.
|
|
|
|
|
cwd,err := os.Getwd()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", fmt.Errorf("Unable to get working directory")
|
2015-11-01 22:46:14 -05:00
|
|
|
|
}
|
2016-01-18 18:36:47 -05:00
|
|
|
|
finalPath = NormalizeWindowsURL(cwd, *url)
|
2018-01-03 17:34:11 -05:00
|
|
|
|
}
|
|
|
|
|
|
2015-08-14 20:49:08 -04:00
|
|
|
|
// Keep track of the source so we can make sure not to delete this later
|
|
|
|
|
sourcePath = finalPath
|
2016-07-03 05:36:29 -04:00
|
|
|
|
if _, err = os.Stat(finalPath); err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
2013-06-12 20:41:44 -04:00
|
|
|
|
} else {
|
2013-06-23 18:58:47 -04:00
|
|
|
|
finalPath = d.config.TargetPath
|
|
|
|
|
|
2013-06-12 20:41:44 -04:00
|
|
|
|
var ok bool
|
2016-07-03 05:36:29 -04:00
|
|
|
|
d.downloader, ok = d.config.DownloaderMap[u.Scheme]
|
2013-06-12 20:41:44 -04:00
|
|
|
|
if !ok {
|
2016-07-03 05:36:29 -04:00
|
|
|
|
return "", fmt.Errorf("No downloader for scheme: %s", u.Scheme)
|
2013-06-12 20:41:44 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Otherwise, download using the downloader.
|
2015-06-15 02:43:25 -04:00
|
|
|
|
f, err = os.OpenFile(finalPath, os.O_RDWR|os.O_CREATE, os.FileMode(0666))
|
2013-06-12 20:41:44 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-03 05:36:29 -04:00
|
|
|
|
log.Printf("[DEBUG] Downloading: %s", u.String())
|
|
|
|
|
err = d.downloader.Download(f, u)
|
2015-06-22 15:17:29 -04:00
|
|
|
|
f.Close()
|
2013-07-07 15:16:31 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
2013-06-12 20:41:44 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if d.config.Hash != nil {
|
|
|
|
|
var verify bool
|
|
|
|
|
verify, err = d.VerifyChecksum(finalPath)
|
|
|
|
|
if err == nil && !verify {
|
2015-08-14 20:49:08 -04:00
|
|
|
|
// Only delete the file if we made a copy or downloaded it
|
|
|
|
|
if sourcePath != finalPath {
|
|
|
|
|
os.Remove(finalPath)
|
|
|
|
|
}
|
2015-06-22 15:17:29 -04:00
|
|
|
|
|
|
|
|
|
err = fmt.Errorf(
|
|
|
|
|
"checksums didn't match expected: %s",
|
|
|
|
|
hex.EncodeToString(d.config.Checksum))
|
2013-06-12 20:41:44 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return finalPath, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// PercentProgress returns the download progress as a percentage.
|
2013-08-14 11:15:25 -04:00
|
|
|
|
func (d *DownloadClient) PercentProgress() int {
|
2013-06-28 22:34:43 -04:00
|
|
|
|
if d.downloader == nil {
|
2013-08-14 11:15:25 -04:00
|
|
|
|
return -1
|
2013-06-28 22:34:43 -04:00
|
|
|
|
}
|
|
|
|
|
|
2013-08-14 11:15:25 -04:00
|
|
|
|
return int((float64(d.downloader.Progress()) / float64(d.downloader.Total())) * 100)
|
2013-06-12 20:41:44 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// VerifyChecksum tests that the path matches the checksum for the
|
|
|
|
|
// download.
|
|
|
|
|
func (d *DownloadClient) VerifyChecksum(path string) (bool, error) {
|
|
|
|
|
if d.config.Checksum == nil || d.config.Hash == nil {
|
|
|
|
|
return false, errors.New("Checksum or Hash isn't set on download.")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
f, err := os.Open(path)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return false, err
|
|
|
|
|
}
|
|
|
|
|
defer f.Close()
|
|
|
|
|
|
2013-06-28 22:34:43 -04:00
|
|
|
|
log.Printf("Verifying checksum of %s", path)
|
2013-06-12 20:41:44 -04:00
|
|
|
|
d.config.Hash.Reset()
|
|
|
|
|
io.Copy(d.config.Hash, f)
|
2017-03-28 21:29:55 -04:00
|
|
|
|
return bytes.Equal(d.config.Hash.Sum(nil), d.config.Checksum), nil
|
2013-06-12 20:41:44 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// HTTPDownloader is an implementation of Downloader that downloads
|
|
|
|
|
// files over HTTP.
|
|
|
|
|
type HTTPDownloader struct {
|
2014-01-09 11:41:34 -05:00
|
|
|
|
progress uint
|
|
|
|
|
total uint
|
|
|
|
|
userAgent string
|
2013-06-12 20:41:44 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (*HTTPDownloader) Cancel() {
|
|
|
|
|
// TODO(mitchellh): Implement
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-15 02:43:25 -04:00
|
|
|
|
func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error {
|
2016-04-05 14:11:30 -04:00
|
|
|
|
log.Printf("Starting download over HTTP: %s", src.String())
|
2015-06-22 15:14:35 -04:00
|
|
|
|
|
|
|
|
|
// Seek to the beginning by default
|
|
|
|
|
if _, err := dst.Seek(0, 0); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-22 17:59:38 -04:00
|
|
|
|
// Reset our progress
|
|
|
|
|
d.progress = 0
|
|
|
|
|
|
2015-06-22 15:14:35 -04:00
|
|
|
|
// Make the request. We first make a HEAD request so we can check
|
|
|
|
|
// if the server supports range queries. If the server/URL doesn't
|
|
|
|
|
// support HEAD requests, we just fall back to GET.
|
2015-06-15 02:43:25 -04:00
|
|
|
|
req, err := http.NewRequest("HEAD", src.String(), nil)
|
2013-08-18 14:34:36 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-09 11:41:34 -05:00
|
|
|
|
if d.userAgent != "" {
|
|
|
|
|
req.Header.Set("User-Agent", d.userAgent)
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-18 14:34:36 -04:00
|
|
|
|
httpClient := &http.Client{
|
|
|
|
|
Transport: &http.Transport{
|
|
|
|
|
Proxy: http.ProxyFromEnvironment,
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
resp, err := httpClient.Do(req)
|
2015-06-22 15:14:35 -04:00
|
|
|
|
if err == nil && (resp.StatusCode >= 200 && resp.StatusCode < 300) {
|
|
|
|
|
// If the HEAD request succeeded, then attempt to set the range
|
|
|
|
|
// query if we can.
|
|
|
|
|
if resp.Header.Get("Accept-Ranges") == "bytes" {
|
|
|
|
|
if fi, err := dst.Stat(); err == nil {
|
|
|
|
|
if _, err = dst.Seek(0, os.SEEK_END); err == nil {
|
|
|
|
|
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size()))
|
|
|
|
|
d.progress = uint(fi.Size())
|
|
|
|
|
}
|
2015-06-15 18:04:48 -04:00
|
|
|
|
}
|
2015-06-15 02:43:25 -04:00
|
|
|
|
}
|
2013-07-07 15:16:31 -04:00
|
|
|
|
}
|
|
|
|
|
|
2015-06-22 15:14:35 -04:00
|
|
|
|
// Set the request to GET now, and redo the query to download
|
2015-06-15 18:04:48 -04:00
|
|
|
|
req.Method = "GET"
|
2015-06-15 02:43:25 -04:00
|
|
|
|
|
|
|
|
|
resp, err = httpClient.Do(req)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-22 17:58:27 -04:00
|
|
|
|
d.total = d.progress + uint(resp.ContentLength)
|
2013-06-12 20:41:44 -04:00
|
|
|
|
var buffer [4096]byte
|
|
|
|
|
for {
|
|
|
|
|
n, err := resp.Body.Read(buffer[:])
|
|
|
|
|
if err != nil && err != io.EOF {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
d.progress += uint(n)
|
|
|
|
|
|
|
|
|
|
if _, werr := dst.Write(buffer[:n]); werr != nil {
|
|
|
|
|
return werr
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err == io.EOF {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (d *HTTPDownloader) Progress() uint {
|
|
|
|
|
return d.progress
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (d *HTTPDownloader) Total() uint {
|
|
|
|
|
return d.total
|
|
|
|
|
}
|
2016-04-05 14:11:30 -04:00
|
|
|
|
|
|
|
|
|
// FTPDownloader is an implementation of Downloader that downloads
|
|
|
|
|
// files over FTP.
|
|
|
|
|
type FTPDownloader struct {
|
|
|
|
|
userInfo url.UserInfo
|
|
|
|
|
mtu uint
|
|
|
|
|
|
|
|
|
|
active bool
|
|
|
|
|
progress uint
|
|
|
|
|
total uint
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (*FTPDownloader) Cancel() {
|
|
|
|
|
d.active = false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (d *FTPDownloader) Download(dst *os.File, src *url.URL) error {
|
|
|
|
|
var userinfo *url.Userinfo
|
|
|
|
|
|
|
|
|
|
userinfo = d.userInfo
|
|
|
|
|
d.active = false
|
|
|
|
|
|
|
|
|
|
// check the uri is correct
|
|
|
|
|
uri, err := url.Parse(src)
|
|
|
|
|
if err != nil { return err }
|
|
|
|
|
|
|
|
|
|
if uri.Scheme != "ftp" {
|
|
|
|
|
return fmt.Errorf("Unexpected uri scheme: %s", uri.Scheme)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// connect to ftp server
|
|
|
|
|
var cli *ftp.ServerConn
|
|
|
|
|
|
|
|
|
|
log.Printf("Starting download over FTP: %s : %s\n", uri.Host, Uri.Path)
|
|
|
|
|
cli,err := ftp.Dial(uri.Host)
|
|
|
|
|
if err != nil { return nil }
|
|
|
|
|
defer cli.Close()
|
|
|
|
|
|
|
|
|
|
// handle authentication
|
|
|
|
|
if uri.User != nil { userinfo = uri.User }
|
|
|
|
|
|
|
|
|
|
log.Printf("Authenticating to FTP server: %s : %s\n", uri.User.username, uri.User.password)
|
|
|
|
|
err = cli.Login(userinfo.username, userinfo.password)
|
|
|
|
|
if err != nil { return err }
|
|
|
|
|
|
|
|
|
|
// locate specified path
|
|
|
|
|
path := path.Dir(uri.Path)
|
|
|
|
|
|
|
|
|
|
log.Printf("Changing to FTP directory : %s\n", path)
|
|
|
|
|
err = cli.ChangeDir(path)
|
|
|
|
|
if err != nil { return nil }
|
|
|
|
|
|
|
|
|
|
curpath,err := cli.CurrentDir()
|
|
|
|
|
if err != nil { return err }
|
|
|
|
|
log.Printf("Current FTP directory : %s\n", curpath)
|
|
|
|
|
|
|
|
|
|
// collect stats about the specified file
|
|
|
|
|
var name string
|
|
|
|
|
var entry *ftp.Entry
|
|
|
|
|
|
|
|
|
|
_,name = path.Split(uri.Path)
|
|
|
|
|
entry = nil
|
|
|
|
|
|
|
|
|
|
entries,err := cli.List(curpath)
|
|
|
|
|
for _,e := range entries {
|
|
|
|
|
if e.Type == ftp.EntryTypeFile && e.Name == name {
|
|
|
|
|
entry = e
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if entry == nil {
|
|
|
|
|
return fmt.Errorf("Unable to find file: %s", uri.Path)
|
|
|
|
|
}
|
|
|
|
|
log.Printf("Found file : %s : %v bytes\n", entry.Name, entry.Size)
|
|
|
|
|
|
|
|
|
|
d.progress = 0
|
|
|
|
|
d.total = entry.Size
|
|
|
|
|
|
|
|
|
|
// download specified file
|
|
|
|
|
d.active = true
|
|
|
|
|
reader,err := cli.RetrFrom(uri.Path, d.progress)
|
|
|
|
|
if err != nil { return nil }
|
|
|
|
|
|
|
|
|
|
// do it in a goro so that if someone wants to cancel it, they can
|
|
|
|
|
errch := make(chan error)
|
|
|
|
|
go func(d *FTPDownloader, r *io.Reader, w *bufio.Writer, e chan error) {
|
|
|
|
|
defer w.Flush()
|
|
|
|
|
for ; d.active {
|
|
|
|
|
n,err := io.CopyN(writer, reader, d.mtu)
|
|
|
|
|
if err != nil { break }
|
|
|
|
|
d.progress += n
|
|
|
|
|
}
|
|
|
|
|
d.active = false
|
|
|
|
|
e <- err
|
|
|
|
|
}(d, reader, bufio.NewWriter(dst), errch)
|
|
|
|
|
|
|
|
|
|
// spin until it's done
|
|
|
|
|
err = <-errch
|
|
|
|
|
reader.Close()
|
|
|
|
|
|
|
|
|
|
if err == nil && d.progress != d.total {
|
|
|
|
|
err = fmt.Errorf("FTP total transfer size was %d when %d was expected", d.progress, d.total)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// log out and quit
|
|
|
|
|
cli.Logout()
|
|
|
|
|
cli.Quit()
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (d *FTPDownloader) Progress() uint {
|
|
|
|
|
return d.progress
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (d *FTPDownloader) Total() uint {
|
|
|
|
|
return d.total
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// FileDownloader is an implementation of Downloader that downloads
|
|
|
|
|
// files using the regular filesystem.
|
|
|
|
|
type FileDownloader struct {
|
|
|
|
|
bufferSize *uint
|
|
|
|
|
|
|
|
|
|
active bool
|
|
|
|
|
progress uint
|
|
|
|
|
total uint
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (*FileDownloader) Cancel() {
|
|
|
|
|
d.active = false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (d *FileDownloader) Progress() uint {
|
|
|
|
|
return d.progress
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (d *FileDownloader) Download(dst *os.File, src *url.URL) error {
|
|
|
|
|
d.active = false
|
|
|
|
|
|
|
|
|
|
/* parse the uri using the net/url module */
|
|
|
|
|
uri, err := url.Parse(src)
|
|
|
|
|
if uri.Scheme != "file" {
|
|
|
|
|
return fmt.Errorf("Unexpected uri scheme: %s", uri.Scheme)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* use the current working directory as the base for relative uri's */
|
|
|
|
|
cwd,err := os.Getwd()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", fmt.Errorf("Unable to get working directory")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* determine which uri format is being used and convert to a real path */
|
|
|
|
|
var realpath string, basepath string
|
|
|
|
|
basepath = filepath.ToSlash(cwd)
|
|
|
|
|
|
|
|
|
|
// absolute path -- file://c:/absolute/path
|
|
|
|
|
if strings.HasSuffix(uri.Host, ":") {
|
|
|
|
|
realpath = path.Join(uri.Host, uri.Path)
|
|
|
|
|
|
|
|
|
|
// semi-absolute path (current drive letter) -- file:///absolute/path
|
|
|
|
|
} else if uri.Host == "" && strings.HasPrefix(uri.Path, "/") {
|
|
|
|
|
realpath = path.Join(filepath.VolumeName(basepath), uri.Path)
|
|
|
|
|
|
|
|
|
|
// relative path -- file://./relative/path
|
|
|
|
|
} else if uri.Host == "." {
|
|
|
|
|
realpath = path.Join(basepath, uri.Path)
|
|
|
|
|
|
|
|
|
|
// relative path -- file://relative/path
|
|
|
|
|
} else {
|
|
|
|
|
realpath = path.Join(basepath, uri.Host, uri.Path)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* download the file using the operating system's facilities */
|
|
|
|
|
d.progress = 0
|
|
|
|
|
d.active = true
|
|
|
|
|
|
|
|
|
|
f, err = os.Open(realpath)
|
|
|
|
|
if err != nil { return err }
|
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
|
|
// get the file size
|
|
|
|
|
fi, err := f.Stat()
|
|
|
|
|
if err != nil { return err }
|
|
|
|
|
d.total = fi.Size()
|
|
|
|
|
|
|
|
|
|
// no bufferSize specified, so copy synchronously.
|
|
|
|
|
if d.bufferSize == nil {
|
|
|
|
|
n,err := io.Copy(dst, f)
|
|
|
|
|
d.active = false
|
|
|
|
|
d.progress += n
|
|
|
|
|
|
|
|
|
|
// use a goro in case someone else wants to enable cancel/resume
|
|
|
|
|
} else {
|
|
|
|
|
errch := make(chan error)
|
|
|
|
|
go func(d* FileDownloader, r *bufio.Reader, w *bufio.Writer, e chan error) {
|
|
|
|
|
defer w.Flush()
|
|
|
|
|
for ; d.active {
|
|
|
|
|
n,err := io.CopyN(writer, reader, d.bufferSize)
|
|
|
|
|
if err != nil { break }
|
|
|
|
|
d.progress += n
|
|
|
|
|
}
|
|
|
|
|
d.active = false
|
|
|
|
|
e <- err
|
|
|
|
|
}(d, f, bufio.NewWriter(dst), errch)
|
|
|
|
|
|
|
|
|
|
// ...and we spin until it's done
|
|
|
|
|
err = <-errch
|
|
|
|
|
}
|
|
|
|
|
f.Close()
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (d *FileDownloader) Total() uint {
|
|
|
|
|
return d.total
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SMBDownloader is an implementation of Downloader that downloads
|
|
|
|
|
// files using the "\\" path format on Windows
|
|
|
|
|
type SMBDownloader struct {
|
|
|
|
|
bufferSize *uint
|
|
|
|
|
|
|
|
|
|
active bool
|
|
|
|
|
progress uint
|
|
|
|
|
total uint
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (*SMBDownloader) Cancel() {
|
|
|
|
|
d.active = false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (d *SMBDownloader) Progress() uint {
|
|
|
|
|
return d.progress
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (d *SMBDownloader) Download(dst *os.File, src *url.URL) error {
|
|
|
|
|
const UNCPrefix = string(os.PathSeparator)+string(os.PathSeparator)
|
|
|
|
|
d.active = false
|
|
|
|
|
|
|
|
|
|
if runtime.GOOS != "windows" {
|
|
|
|
|
return fmt.Errorf("Support for SMB based uri's are not supported on %s", runtime.GOOS)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* convert the uri using the net/url module to a UNC path */
|
|
|
|
|
var realpath string
|
|
|
|
|
uri, err := url.Parse(src)
|
|
|
|
|
if uri.Scheme != "smb" {
|
|
|
|
|
return fmt.Errorf("Unexpected uri scheme: %s", uri.Scheme)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
realpath = UNCPrefix + filepath.ToSlash(path.Join(uri.Host, uri.Path))
|
|
|
|
|
|
|
|
|
|
/* Open up the "\\"-prefixed path using the Windows filesystem */
|
|
|
|
|
d.progress = 0
|
|
|
|
|
d.active = true
|
|
|
|
|
|
|
|
|
|
f, err = os.Open(realpath)
|
|
|
|
|
if err != nil { return err }
|
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
|
|
// get the file size (at the risk of performance)
|
|
|
|
|
fi, err := f.Stat()
|
|
|
|
|
if err != nil { return err }
|
|
|
|
|
d.total = fi.Size()
|
|
|
|
|
|
|
|
|
|
// no bufferSize specified, so copy synchronously.
|
|
|
|
|
if d.bufferSize == nil {
|
|
|
|
|
n,err := io.Copy(dst, f)
|
|
|
|
|
d.active = false
|
|
|
|
|
d.progress += n
|
|
|
|
|
|
|
|
|
|
// use a goro in case someone else wants to enable cancel/resume
|
|
|
|
|
} else {
|
|
|
|
|
errch := make(chan error)
|
|
|
|
|
go func(d* SMBDownloader, r *bufio.Reader, w *bufio.Writer, e chan error) {
|
|
|
|
|
defer w.Flush()
|
|
|
|
|
for ; d.active {
|
|
|
|
|
n,err := io.CopyN(writer, reader, d.bufferSize)
|
|
|
|
|
if err != nil { break }
|
|
|
|
|
d.progress += n
|
|
|
|
|
}
|
|
|
|
|
d.active = false
|
|
|
|
|
e <- err
|
|
|
|
|
}(d, f, bufio.NewWriter(dst), errch)
|
|
|
|
|
|
|
|
|
|
// ...and as usual we spin until it's done
|
|
|
|
|
err = <-errch
|
|
|
|
|
}
|
|
|
|
|
f.Close()
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (d *SMBDownloader) Total() uint {
|
|
|
|
|
return d.total
|
|
|
|
|
}
|