Compare commits

...

2 Commits

Author SHA1 Message Date
Megan Marsh
371667c3a7 vendor 2021-04-12 15:38:36 -07:00
Megan Marsh
f570852816 extract Vagrant 2021-04-12 15:38:23 -07:00
78 changed files with 461 additions and 2215 deletions

View File

@ -1,72 +0,0 @@
package vagrant
import (
"runtime"
"strings"
"testing"
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
)
func TestArtifact_Impl(t *testing.T) {
var raw interface{} = &artifact{}
if _, ok := raw.(packersdk.Artifact); !ok {
t.Fatalf("Artifact does not implement packersdk.Artifact")
}
}
func TestArtifactId(t *testing.T) {
a := &artifact{
OutputDir: "/my/dir",
BoxName: "package.box",
Provider: "virtualbox",
}
expected := "virtualbox"
if a.Id() != expected {
t.Fatalf("artifact ID should match: expected: %s received: %s", expected, a.Id())
}
}
func TestArtifactString(t *testing.T) {
a := &artifact{
OutputDir: "/my/dir",
BoxName: "package.box",
Provider: "virtualbox",
}
expected := "Vagrant box 'package.box' for 'virtualbox' provider"
if runtime.GOOS == "windows" {
expected = strings.Replace(expected, "/", "\\", -1)
}
if strings.Compare(a.String(), expected) != 0 {
t.Fatalf("artifact string should match: expected: %s received: %s", expected, a.String())
}
}
func TestArtifactState(t *testing.T) {
expectedData := "this is the data"
a := &artifact{
StateData: map[string]interface{}{"state_data": expectedData},
}
// Valid state
result := a.State("state_data")
if result != expectedData {
t.Fatalf("Bad: State data was %s instead of %s", result, expectedData)
}
// Invalid state
result = a.State("invalid_key")
if result != nil {
t.Fatalf("Bad: State should be nil for invalid state data name")
}
// Nil StateData should not fail and should return nil
a = &artifact{}
result = a.State("key")
if result != nil {
t.Fatalf("Bad: State should be nil for nil StateData")
}
}

View File

@ -1,130 +0,0 @@
package vagrant
import (
"testing"
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
)
func TestBuilder_ImplementsBuilder(t *testing.T) {
var raw interface{}
raw = &Builder{}
if _, ok := raw.(packersdk.Builder); !ok {
t.Fatalf("Builder should be a builder")
}
}
func TestBuilder_Prepare_ValidateSource(t *testing.T) {
type testCase struct {
config map[string]interface{}
errExpected bool
reason string
}
cases := []testCase{
{
config: map[string]interface{}{
"global_id": "a3559ec",
},
errExpected: true,
reason: "Need to set SSH communicator.",
},
{
config: map[string]interface{}{
"global_id": "a3559ec",
"communicator": "ssh",
},
errExpected: false,
reason: "Shouldn't fail because we've set global_id",
},
{
config: map[string]interface{}{
"communicator": "ssh",
},
errExpected: true,
reason: "Should fail because we must set source_path or global_id",
},
{
config: map[string]interface{}{
"source_path": "./mybox",
"communicator": "ssh",
},
errExpected: false,
reason: "Source path is set; we should be fine",
},
{
config: map[string]interface{}{
"source_path": "./mybox",
"communicator": "ssh",
"global_id": "a3559ec",
},
errExpected: true,
reason: "Both source path and global are set: we should error.",
},
{
config: map[string]interface{}{
"communicator": "ssh",
"global_id": "a3559ec",
"teardown_method": "suspend",
},
errExpected: false,
reason: "Valid argument for teardown method",
},
{
config: map[string]interface{}{
"communicator": "ssh",
"global_id": "a3559ec",
"teardown_method": "surspernd",
},
errExpected: true,
reason: "Inalid argument for teardown method",
},
{
config: map[string]interface{}{
"communicator": "ssh",
"source_path": "./my.box",
},
errExpected: true,
reason: "Should fail because path does not exist",
},
{
config: map[string]interface{}{
"communicator": "ssh",
"source_path": "file://my.box",
},
errExpected: true,
reason: "Should fail because path does not exist",
},
{
config: map[string]interface{}{
"communicator": "ssh",
"source_path": "http://my.box",
},
errExpected: false,
reason: "Should pass because path is not local",
},
{
config: map[string]interface{}{
"communicator": "ssh",
"source_path": "https://my.box",
},
errExpected: false,
reason: "Should pass because path is not local",
},
{
config: map[string]interface{}{
"communicator": "ssh",
"source_path": "smb://my.box",
},
errExpected: false,
reason: "Should pass because path is not local",
},
}
for _, tc := range cases {
_, _, err := (&Builder{}).Prepare(tc.config)
if (err != nil) != tc.errExpected {
t.Fatalf("Unexpected behavior from test case %#v; %s.", tc.config, tc.reason)
}
}
}

View File

@ -1,62 +0,0 @@
package vagrant
import (
"strings"
"testing"
"github.com/hashicorp/packer-plugin-sdk/multistep"
)
func TestStepAdd_Impl(t *testing.T) {
var raw interface{}
raw = new(StepAddBox)
if _, ok := raw.(multistep.Step); !ok {
t.Fatalf("initialize should be a step")
}
}
func TestPrepAddArgs(t *testing.T) {
type testArgs struct {
Step StepAddBox
Expected []string
}
addTests := []testArgs{
{
Step: StepAddBox{
SourceBox: "my_source_box.box",
BoxName: "AWESOME BOX",
},
Expected: []string{"AWESOME BOX", "my_source_box.box"},
},
{
Step: StepAddBox{
SourceBox: "my_source_box",
BoxName: "AWESOME BOX",
},
Expected: []string{"my_source_box"},
},
{
Step: StepAddBox{
BoxVersion: "eleventyone",
CACert: "adfasdf",
CAPath: "adfasdf",
DownloadCert: "adfasdf",
Clean: true,
Force: true,
Insecure: true,
Provider: "virtualbox",
SourceBox: "bananabox.box",
BoxName: "bananas",
},
Expected: []string{"bananas", "bananabox.box", "--box-version", "eleventyone", "--cacert", "adfasdf", "--capath", "adfasdf", "--cert", "adfasdf", "--clean", "--force", "--insecure", "--provider", "virtualbox"},
},
}
for _, addTest := range addTests {
addArgs := addTest.Step.generateAddArgs()
for i, val := range addTest.Expected {
if strings.Compare(addArgs[i], val) != 0 {
t.Fatalf("expected %#v but received %#v", addTest.Expected, addArgs)
}
}
}
}

View File

@ -1,115 +0,0 @@
package vagrant
import (
"io/ioutil"
"os"
"strings"
"testing"
"github.com/hashicorp/packer-plugin-sdk/multistep"
)
func TestStepCreateVagrantfile_Impl(t *testing.T) {
var raw interface{}
raw = new(StepCreateVagrantfile)
if _, ok := raw.(multistep.Step); !ok {
t.Fatalf("initialize should be a step")
}
}
func TestCreateFile(t *testing.T) {
testy := StepCreateVagrantfile{
OutputDir: "./",
SourceBox: "apples",
BoxName: "bananas",
}
templatePath, err := testy.createVagrantfile()
if err != nil {
t.Fatalf(err.Error())
}
defer os.Remove(templatePath)
contents, err := ioutil.ReadFile(templatePath)
if err != nil {
t.Fatalf(err.Error())
}
actual := string(contents)
expected := `Vagrant.configure("2") do |config|
config.vm.define "source", autostart: false do |source|
source.vm.box = "apples"
config.ssh.insert_key = false
end
config.vm.define "output" do |output|
output.vm.box = "bananas"
output.vm.box_url = "file://package.box"
config.ssh.insert_key = false
end
config.vm.synced_folder ".", "/vagrant", disabled: true
end`
if ok := strings.Compare(actual, expected); ok != 0 {
t.Fatalf("EXPECTED: \n%s\n\n RECEIVED: \n%s\n\n", expected, actual)
}
}
func TestCreateFile_customSync(t *testing.T) {
testy := StepCreateVagrantfile{
OutputDir: "./",
SyncedFolder: "myfolder/foldertimes",
}
templatePath, err := testy.createVagrantfile()
if err != nil {
t.Fatalf(err.Error())
}
defer os.Remove(templatePath)
contents, err := ioutil.ReadFile(templatePath)
if err != nil {
t.Fatalf(err.Error())
}
actual := string(contents)
expected := `Vagrant.configure("2") do |config|
config.vm.define "source", autostart: false do |source|
source.vm.box = ""
config.ssh.insert_key = false
end
config.vm.define "output" do |output|
output.vm.box = ""
output.vm.box_url = "file://package.box"
config.ssh.insert_key = false
end
config.vm.synced_folder "myfolder/foldertimes", "/vagrant"
end`
if ok := strings.Compare(actual, expected); ok != 0 {
t.Fatalf("EXPECTED: \n%s\n\n RECEIVED: \n%s\n\n", expected, actual)
}
}
func TestCreateFile_InsertKeyTrue(t *testing.T) {
testy := StepCreateVagrantfile{
OutputDir: "./",
InsertKey: true,
}
templatePath, err := testy.createVagrantfile()
if err != nil {
t.Fatalf(err.Error())
}
defer os.Remove(templatePath)
contents, err := ioutil.ReadFile(templatePath)
if err != nil {
t.Fatalf(err.Error())
}
actual := string(contents)
expected := `Vagrant.configure("2") do |config|
config.vm.define "source", autostart: false do |source|
source.vm.box = ""
config.ssh.insert_key = true
end
config.vm.define "output" do |output|
output.vm.box = ""
output.vm.box_url = "file://package.box"
config.ssh.insert_key = true
end
config.vm.synced_folder ".", "/vagrant", disabled: true
end`
if ok := strings.Compare(actual, expected); ok != 0 {
t.Fatalf("EXPECTED: \n%s\n\n RECEIVED: \n%s\n\n", expected, actual)
}
}

View File

@ -1,176 +0,0 @@
package vagrant
import (
"context"
"testing"
"github.com/hashicorp/packer-plugin-sdk/communicator"
"github.com/hashicorp/packer-plugin-sdk/multistep"
)
func TestStepSSHConfig_Impl(t *testing.T) {
var raw interface{}
raw = new(StepSSHConfig)
if _, ok := raw.(multistep.Step); !ok {
t.Fatalf("initialize should be a step")
}
}
func TestPrepStepSSHConfig_sshOverrides(t *testing.T) {
type testcase struct {
name string
inputSSHConfig communicator.SSH
expectedSSHConfig communicator.SSH
}
tcs := []testcase{
{
// defaults to overriding with the ssh config from vagrant\
name: "default",
inputSSHConfig: communicator.SSH{},
expectedSSHConfig: communicator.SSH{
SSHHost: "127.0.0.1",
SSHPort: 2222,
SSHUsername: "vagrant",
SSHPassword: "",
},
},
{
// respects SSH host and port overrides independent of credential
// overrides
name: "host_override",
inputSSHConfig: communicator.SSH{
SSHHost: "123.45.67.8",
SSHPort: 1234,
},
expectedSSHConfig: communicator.SSH{
SSHHost: "123.45.67.8",
SSHPort: 1234,
SSHUsername: "vagrant",
SSHPassword: "",
},
},
{
// respects credential overrides
name: "credential_override",
inputSSHConfig: communicator.SSH{
SSHUsername: "megan",
SSHPassword: "SoSecure",
},
expectedSSHConfig: communicator.SSH{
SSHHost: "127.0.0.1",
SSHPort: 2222,
SSHUsername: "megan",
SSHPassword: "SoSecure",
},
},
}
for _, tc := range tcs {
driver := &MockVagrantDriver{}
config := &Config{
Comm: communicator.Config{
SSH: tc.inputSSHConfig,
},
}
state := new(multistep.BasicStateBag)
state.Put("driver", driver)
state.Put("config", config)
step := StepSSHConfig{}
_ = step.Run(context.Background(), state)
if config.Comm.SSHHost != tc.expectedSSHConfig.SSHHost {
t.Fatalf("unexpected sshconfig host: name: %s, recieved %s", tc.name, config.Comm.SSHHost)
}
if config.Comm.SSHPort != tc.expectedSSHConfig.SSHPort {
t.Fatalf("unexpected sshconfig port: name: %s, recieved %d", tc.name, config.Comm.SSHPort)
}
if config.Comm.SSHUsername != tc.expectedSSHConfig.SSHUsername {
t.Fatalf("unexpected sshconfig SSHUsername: name: %s, recieved %s", tc.name, config.Comm.SSHUsername)
}
if config.Comm.SSHPassword != tc.expectedSSHConfig.SSHPassword {
t.Fatalf("unexpected sshconfig SSHUsername: name: %s, recieved %s", tc.name, config.Comm.SSHPassword)
}
}
}
func TestPrepStepSSHConfig_GlobalID(t *testing.T) {
driver := &MockVagrantDriver{}
config := &Config{}
state := new(multistep.BasicStateBag)
state.Put("driver", driver)
state.Put("config", config)
step := StepSSHConfig{
GlobalID: "adsfadf",
}
_ = step.Run(context.Background(), state)
if driver.GlobalID != "adsfadf" {
t.Fatalf("Should have called SSHConfig with GlobalID asdfasdf")
}
}
func TestPrepStepSSHConfig_NoGlobalID(t *testing.T) {
driver := &MockVagrantDriver{}
config := &Config{}
state := new(multistep.BasicStateBag)
state.Put("driver", driver)
state.Put("config", config)
step := StepSSHConfig{}
_ = step.Run(context.Background(), state)
if driver.GlobalID != "source" {
t.Fatalf("Should have called SSHConfig with GlobalID source")
}
}
func TestPrepStepSSHConfig_SpacesInPath(t *testing.T) {
driver := &MockVagrantDriver{}
driver.ReturnSSHConfig = &VagrantSSHConfig{
Hostname: "127.0.0.1",
User: "vagrant",
Port: "2222",
UserKnownHostsFile: "/dev/null",
StrictHostKeyChecking: false,
PasswordAuthentication: false,
IdentityFile: "\"/path with spaces/insecure_private_key\"",
IdentitiesOnly: true,
LogLevel: "FATAL"}
config := &Config{}
state := new(multistep.BasicStateBag)
state.Put("driver", driver)
state.Put("config", config)
step := StepSSHConfig{}
_ = step.Run(context.Background(), state)
expected := "/path with spaces/insecure_private_key"
if config.Comm.SSHPrivateKeyFile != expected {
t.Fatalf("Bad config private key. Recieved: %s; expected: %s.", config.Comm.SSHPrivateKeyFile, expected)
}
}
func TestPrepStepSSHConfig_NoSpacesInPath(t *testing.T) {
driver := &MockVagrantDriver{}
driver.ReturnSSHConfig = &VagrantSSHConfig{
Hostname: "127.0.0.1",
User: "vagrant",
Port: "2222",
UserKnownHostsFile: "/dev/null",
StrictHostKeyChecking: false,
PasswordAuthentication: false,
IdentityFile: "/path/without/spaces/insecure_private_key",
IdentitiesOnly: true,
LogLevel: "FATAL"}
config := &Config{}
state := new(multistep.BasicStateBag)
state.Put("driver", driver)
state.Put("config", config)
step := StepSSHConfig{}
_ = step.Run(context.Background(), state)
expected := "/path/without/spaces/insecure_private_key"
if config.Comm.SSHPrivateKeyFile != expected {
t.Fatalf("Bad config private key. Recieved: %s; expected: %s.", config.Comm.SSHPrivateKeyFile, expected)
}
}

View File

@ -1,40 +0,0 @@
package vagrant
import (
"strings"
"testing"
)
func TestPrepUpArgs(t *testing.T) {
type testArgs struct {
Step StepUp
Expected []string
}
tests := []testArgs{
{
Step: StepUp{
GlobalID: "foo",
Provider: "bar",
},
Expected: []string{"foo", "--provider=bar"},
},
{
Step: StepUp{},
Expected: []string{"source"},
},
{
Step: StepUp{
Provider: "pro",
},
Expected: []string{"source", "--provider=pro"},
},
}
for _, test := range tests {
args := test.Step.generateArgs()
for i, val := range test.Expected {
if strings.Compare(args[i], val) != 0 {
t.Fatalf("expected %#v but received %#v", test.Expected, args)
}
}
}
}

View File

@ -1,13 +0,0 @@
package version
import (
"github.com/hashicorp/packer-plugin-sdk/version"
packerVersion "github.com/hashicorp/packer/version"
)
var VagrantPluginVersion *version.PluginVersion
func init() {
VagrantPluginVersion = version.InitializePluginVersion(
packerVersion.Version, packerVersion.VersionPrerelease)
}

View File

@ -50,7 +50,6 @@ import (
tencentcloudcvmbuilder "github.com/hashicorp/packer/builder/tencentcloud/cvm" tencentcloudcvmbuilder "github.com/hashicorp/packer/builder/tencentcloud/cvm"
tritonbuilder "github.com/hashicorp/packer/builder/triton" tritonbuilder "github.com/hashicorp/packer/builder/triton"
uclouduhostbuilder "github.com/hashicorp/packer/builder/ucloud/uhost" uclouduhostbuilder "github.com/hashicorp/packer/builder/ucloud/uhost"
vagrantbuilder "github.com/hashicorp/packer/builder/vagrant"
virtualboxisobuilder "github.com/hashicorp/packer/builder/virtualbox/iso" virtualboxisobuilder "github.com/hashicorp/packer/builder/virtualbox/iso"
virtualboxovfbuilder "github.com/hashicorp/packer/builder/virtualbox/ovf" virtualboxovfbuilder "github.com/hashicorp/packer/builder/virtualbox/ovf"
virtualboxvmbuilder "github.com/hashicorp/packer/builder/virtualbox/vm" virtualboxvmbuilder "github.com/hashicorp/packer/builder/virtualbox/vm"
@ -69,8 +68,6 @@ import (
manifestpostprocessor "github.com/hashicorp/packer/post-processor/manifest" manifestpostprocessor "github.com/hashicorp/packer/post-processor/manifest"
shelllocalpostprocessor "github.com/hashicorp/packer/post-processor/shell-local" shelllocalpostprocessor "github.com/hashicorp/packer/post-processor/shell-local"
ucloudimportpostprocessor "github.com/hashicorp/packer/post-processor/ucloud-import" ucloudimportpostprocessor "github.com/hashicorp/packer/post-processor/ucloud-import"
vagrantpostprocessor "github.com/hashicorp/packer/post-processor/vagrant"
vagrantcloudpostprocessor "github.com/hashicorp/packer/post-processor/vagrant-cloud"
vspherepostprocessor "github.com/hashicorp/packer/post-processor/vsphere" vspherepostprocessor "github.com/hashicorp/packer/post-processor/vsphere"
vspheretemplatepostprocessor "github.com/hashicorp/packer/post-processor/vsphere-template" vspheretemplatepostprocessor "github.com/hashicorp/packer/post-processor/vsphere-template"
yandexexportpostprocessor "github.com/hashicorp/packer/post-processor/yandex-export" yandexexportpostprocessor "github.com/hashicorp/packer/post-processor/yandex-export"
@ -137,7 +134,6 @@ var Builders = map[string]packersdk.Builder{
"tencentcloud-cvm": new(tencentcloudcvmbuilder.Builder), "tencentcloud-cvm": new(tencentcloudcvmbuilder.Builder),
"triton": new(tritonbuilder.Builder), "triton": new(tritonbuilder.Builder),
"ucloud-uhost": new(uclouduhostbuilder.Builder), "ucloud-uhost": new(uclouduhostbuilder.Builder),
"vagrant": new(vagrantbuilder.Builder),
"virtualbox-iso": new(virtualboxisobuilder.Builder), "virtualbox-iso": new(virtualboxisobuilder.Builder),
"virtualbox-ovf": new(virtualboxovfbuilder.Builder), "virtualbox-ovf": new(virtualboxovfbuilder.Builder),
"virtualbox-vm": new(virtualboxvmbuilder.Builder), "virtualbox-vm": new(virtualboxvmbuilder.Builder),
@ -180,8 +176,6 @@ var PostProcessors = map[string]packersdk.PostProcessor{
"manifest": new(manifestpostprocessor.PostProcessor), "manifest": new(manifestpostprocessor.PostProcessor),
"shell-local": new(shelllocalpostprocessor.PostProcessor), "shell-local": new(shelllocalpostprocessor.PostProcessor),
"ucloud-import": new(ucloudimportpostprocessor.PostProcessor), "ucloud-import": new(ucloudimportpostprocessor.PostProcessor),
"vagrant": new(vagrantpostprocessor.PostProcessor),
"vagrant-cloud": new(vagrantcloudpostprocessor.PostProcessor),
"vsphere": new(vspherepostprocessor.PostProcessor), "vsphere": new(vspherepostprocessor.PostProcessor),
"vsphere-template": new(vspheretemplatepostprocessor.PostProcessor), "vsphere-template": new(vspheretemplatepostprocessor.PostProcessor),
"yandex-export": new(yandexexportpostprocessor.PostProcessor), "yandex-export": new(yandexexportpostprocessor.PostProcessor),

View File

@ -20,6 +20,9 @@ import (
dockerpushpostprocessor "github.com/hashicorp/packer-plugin-docker/post-processor/docker-push" dockerpushpostprocessor "github.com/hashicorp/packer-plugin-docker/post-processor/docker-push"
dockersavepostprocessor "github.com/hashicorp/packer-plugin-docker/post-processor/docker-save" dockersavepostprocessor "github.com/hashicorp/packer-plugin-docker/post-processor/docker-save"
dockertagpostprocessor "github.com/hashicorp/packer-plugin-docker/post-processor/docker-tag" dockertagpostprocessor "github.com/hashicorp/packer-plugin-docker/post-processor/docker-tag"
vagrantbuilder "github.com/hashicorp/packer-plugin-vagrant/builder/vagrant"
vagrantpostprocessor "github.com/hashicorp/packer-plugin-vagrant/post-processor/vagrant"
vagrantcloudpostprocessor "github.com/hashicorp/packer-plugin-vagrant/post-processor/vagrant-cloud"
) )
// VendoredDatasources are datasource components that were once bundled with the // VendoredDatasources are datasource components that were once bundled with the
@ -38,6 +41,7 @@ var VendoredBuilders = map[string]packersdk.Builder{
"amazon-ebssurrogate": new(amazonebssurrogatebuilder.Builder), "amazon-ebssurrogate": new(amazonebssurrogatebuilder.Builder),
"amazon-ebsvolume": new(amazonebsvolumebuilder.Builder), "amazon-ebsvolume": new(amazonebsvolumebuilder.Builder),
"amazon-instance": new(amazoninstancebuilder.Builder), "amazon-instance": new(amazoninstancebuilder.Builder),
"vagrant": new(vagrantbuilder.Builder),
} }
// VendoredProvisioners are provisioner components that were once bundled with the // VendoredProvisioners are provisioner components that were once bundled with the
@ -53,6 +57,8 @@ var VendoredPostProcessors = map[string]packersdk.PostProcessor{
"docker-tag": new(dockertagpostprocessor.PostProcessor), "docker-tag": new(dockertagpostprocessor.PostProcessor),
"exoscale-import": new(exoscaleimportpostprocessor.PostProcessor), "exoscale-import": new(exoscaleimportpostprocessor.PostProcessor),
"amazon-import": new(anazibimportpostprocessor.PostProcessor), "amazon-import": new(anazibimportpostprocessor.PostProcessor),
"vagrant": new(vagrantpostprocessor.PostProcessor),
"vagrant-cloud": new(vagrantcloudpostprocessor.PostProcessor),
} }
// Upon init lets load up any plugins that were vendored manually into the default // Upon init lets load up any plugins that were vendored manually into the default

5
go.mod
View File

@ -51,7 +51,8 @@ require (
github.com/hashicorp/hcl/v2 v2.9.1 github.com/hashicorp/hcl/v2 v2.9.1
github.com/hashicorp/packer-plugin-amazon v0.0.1 github.com/hashicorp/packer-plugin-amazon v0.0.1
github.com/hashicorp/packer-plugin-docker v0.0.7 github.com/hashicorp/packer-plugin-docker v0.0.7
github.com/hashicorp/packer-plugin-sdk v0.1.3-0.20210407132324-af39c7839daf github.com/hashicorp/packer-plugin-sdk v0.1.3
github.com/hashicorp/packer-plugin-vagrant v0.0.1
github.com/hashicorp/vault/api v1.0.4 github.com/hashicorp/vault/api v1.0.4
github.com/hetznercloud/hcloud-go v1.15.1 github.com/hetznercloud/hcloud-go v1.15.1
github.com/hyperonecom/h1-client-go v0.0.0-20191203060043-b46280e4c4a4 github.com/hyperonecom/h1-client-go v0.0.0-20191203060043-b46280e4c4a4
@ -100,4 +101,6 @@ require (
google.golang.org/grpc v1.32.0 google.golang.org/grpc v1.32.0
) )
replace github.com/hashicorp/packer-plugin-vagrant => /Users/mmarsh/Projects/packer-plugin-vagrant
go 1.16 go 1.16

13
go.sum
View File

@ -444,14 +444,8 @@ github.com/hashicorp/packer-plugin-sdk v0.0.14/go.mod h1:tNb3XzJPnjMl3QuUdKmF47B
github.com/hashicorp/packer-plugin-sdk v0.1.0/go.mod h1:CFsC20uZjtER/EnTn/CSMKD0kEdkqOVev8mtOmfnZiI= github.com/hashicorp/packer-plugin-sdk v0.1.0/go.mod h1:CFsC20uZjtER/EnTn/CSMKD0kEdkqOVev8mtOmfnZiI=
github.com/hashicorp/packer-plugin-sdk v0.1.1/go.mod h1:1d3nqB9LUsXMQaNUiL67Q+WYEtjsVcLNTX8ikVlpBrc= github.com/hashicorp/packer-plugin-sdk v0.1.1/go.mod h1:1d3nqB9LUsXMQaNUiL67Q+WYEtjsVcLNTX8ikVlpBrc=
github.com/hashicorp/packer-plugin-sdk v0.1.2/go.mod h1:KRjczE1/c9NV5Re+PXt3myJsVTI/FxEHpZjRjOH0Fug= github.com/hashicorp/packer-plugin-sdk v0.1.2/go.mod h1:KRjczE1/c9NV5Re+PXt3myJsVTI/FxEHpZjRjOH0Fug=
github.com/hashicorp/packer-plugin-sdk v0.1.3-0.20210407090040-d1eff9fe99e8 h1:pkB+Y15/ck/NRUBFF9DrdPYQwmnHsEvnNwmgMfl/8hA= github.com/hashicorp/packer-plugin-sdk v0.1.3 h1:oHTVlgoX2piUzL54+LBo9uIMfW+L/kY7or83dDStdIY=
github.com/hashicorp/packer-plugin-sdk v0.1.3-0.20210407090040-d1eff9fe99e8/go.mod h1:xePpgQgQYv/bamiypx3hH9ukidxDdcN8q0R0wLi8IEQ= github.com/hashicorp/packer-plugin-sdk v0.1.3/go.mod h1:xePpgQgQYv/bamiypx3hH9ukidxDdcN8q0R0wLi8IEQ=
github.com/hashicorp/packer-plugin-sdk v0.1.3-0.20210407130359-85b84b1d6060 h1:uRrDQYiP3pFn5W17Bvj9If2taHB/DqIP7uuPQGnLDFM=
github.com/hashicorp/packer-plugin-sdk v0.1.3-0.20210407130359-85b84b1d6060/go.mod h1:xePpgQgQYv/bamiypx3hH9ukidxDdcN8q0R0wLi8IEQ=
github.com/hashicorp/packer-plugin-sdk v0.1.3-0.20210407130906-826d4f395a10 h1:VlcHJEpR99eeZi7uujdQKFOIK8rE5ditXGqpBWiGjc4=
github.com/hashicorp/packer-plugin-sdk v0.1.3-0.20210407130906-826d4f395a10/go.mod h1:xePpgQgQYv/bamiypx3hH9ukidxDdcN8q0R0wLi8IEQ=
github.com/hashicorp/packer-plugin-sdk v0.1.3-0.20210407132324-af39c7839daf h1:0DBlIExTDefzbfkOl213QtgJsVJXWdgW/aIQIvYUMzs=
github.com/hashicorp/packer-plugin-sdk v0.1.3-0.20210407132324-af39c7839daf/go.mod h1:xePpgQgQYv/bamiypx3hH9ukidxDdcN8q0R0wLi8IEQ=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.9.2 h1:yJoyfZXo4Pk2p/M/viW+YLibBFiIbKoP79gu7kDAFP0= github.com/hashicorp/serf v0.9.2 h1:yJoyfZXo4Pk2p/M/viW+YLibBFiIbKoP79gu7kDAFP0=
github.com/hashicorp/serf v0.9.2/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.2/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
@ -496,8 +490,9 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v0.0.0-20160131094358-f86d2e6d8a77/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v0.0.0-20160131094358-f86d2e6d8a77/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.11.6/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.6/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v0.0.0-20160106104451-349c67577817/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v0.0.0-20160106104451-349c67577817/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/crc32 v0.0.0-20160114101742-999f3125931f/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/crc32 v0.0.0-20160114101742-999f3125931f/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/crc32 v1.2.0 h1:0VuyqOCruD33/lJ/ojXNvzVyl8Zr5zdTmj9l9qLZ86I= github.com/klauspost/crc32 v1.2.0 h1:0VuyqOCruD33/lJ/ojXNvzVyl8Zr5zdTmj9l9qLZ86I=

View File

@ -1,15 +0,0 @@
package vagrantcloud
import (
"testing"
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
)
func TestArtifact_ImplementsArtifact(t *testing.T) {
var raw interface{}
raw = &Artifact{}
if _, ok := raw.(packersdk.Artifact); !ok {
t.Fatalf("Artifact should be a Artifact")
}
}

View File

@ -1,30 +0,0 @@
package vagrantcloud
import (
"encoding/json"
"strings"
"testing"
)
func TestVagranCloudErrors(t *testing.T) {
testCases := []struct {
resp string
expected string
}{
{`{"Status":"422 Unprocessable Entity", "StatusCode":422, "errors":[]}`, ""},
{`{"Status":"404 Artifact not found", "StatusCode":404, "errors":["error1", "error2"]}`, "error1. error2"},
{`{"StatusCode":403, "errors":[{"message":"Bad credentials"}]}`, "message Bad credentials"},
{`{"StatusCode":500, "errors":[["error in unexpected format"]]}`, "[error in unexpected format]"},
}
for _, tc := range testCases {
var cloudErrors VagrantCloudErrors
err := json.NewDecoder(strings.NewReader(tc.resp)).Decode(&cloudErrors)
if err != nil {
t.Errorf("failed to decode error response: %s", err)
}
if got := cloudErrors.FormatErrors(); got != tc.expected {
t.Errorf("failed to get expected response; expected %q, got %q", tc.expected, got)
}
}
}

View File

@ -1,850 +0,0 @@
package vagrantcloud
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"runtime"
"strings"
"testing"
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
"github.com/stretchr/testify/assert"
)
type stubResponse struct {
Path string
Method string
Response string
StatusCode int
}
type tarFiles []struct {
Name, Body string
}
func testGoodConfig() map[string]interface{} {
return map[string]interface{}{
"access_token": "foo",
"version_description": "bar",
"box_tag": "hashicorp/precise64",
"version": "0.5",
}
}
func testBadConfig() map[string]interface{} {
return map[string]interface{}{
"access_token": "foo",
"box_tag": "baz",
"version_description": "bar",
}
}
func testNoAccessTokenProvidedConfig() map[string]interface{} {
return map[string]interface{}{
"box_tag": "baz",
"version_description": "bar",
"version": "0.5",
}
}
func newStackServer(stack []stubResponse) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
if len(stack) < 1 {
rw.Header().Add("Error", fmt.Sprintf("Request stack is empty - Method: %s Path: %s", req.Method, req.URL.Path))
http.Error(rw, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
match := stack[0]
stack = stack[1:]
if match.Method != "" && req.Method != match.Method {
rw.Header().Add("Error", fmt.Sprintf("Request %s != %s", match.Method, req.Method))
http.Error(rw, fmt.Sprintf("Request %s != %s", match.Method, req.Method), http.StatusInternalServerError)
return
}
if match.Path != "" && match.Path != req.URL.Path {
rw.Header().Add("Error", fmt.Sprintf("Request %s != %s", match.Path, req.URL.Path))
http.Error(rw, fmt.Sprintf("Request %s != %s", match.Path, req.URL.Path), http.StatusInternalServerError)
return
}
rw.Header().Add("Complete", fmt.Sprintf("Method: %s Path: %s", match.Method, match.Path))
rw.WriteHeader(match.StatusCode)
if match.Response != "" {
_, err := rw.Write([]byte(match.Response))
if err != nil {
panic("failed to write response: " + err.Error())
}
}
}))
}
func newSecureServer(token string, handler http.HandlerFunc) *httptest.Server {
token = fmt.Sprintf("Bearer %s", token)
return httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
if req.Header.Get("authorization") != token {
http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
if handler != nil {
handler(rw, req)
}
}))
}
func newSelfSignedSslServer(token string, handler http.HandlerFunc) *httptest.Server {
token = fmt.Sprintf("Bearer %s", token)
return httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
if req.Header.Get("authorization") != token {
http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
if handler != nil {
handler(rw, req)
}
}))
}
func newNoAuthServer(handler http.HandlerFunc) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
if req.Header.Get("authorization") != "" {
http.Error(rw, "Authorization header was provider", http.StatusBadRequest)
return
}
if handler != nil {
handler(rw, req)
}
}))
}
func TestPostProcessor_Insecure_Ssl(t *testing.T) {
var p PostProcessor
server := newSelfSignedSslServer("foo", nil)
defer server.Close()
config := testGoodConfig()
config["vagrant_cloud_url"] = server.URL
config["insecure_skip_tls_verify"] = true
if err := p.Configure(config); err != nil {
t.Fatalf("Expected TLS to skip certificate validation: %s", err)
}
}
func TestPostProcessor_Configure_fromVagrantEnv(t *testing.T) {
var p PostProcessor
config := testGoodConfig()
server := newSecureServer("bar", nil)
defer server.Close()
config["vagrant_cloud_url"] = server.URL
config["access_token"] = ""
os.Setenv("VAGRANT_CLOUD_TOKEN", "bar")
defer func() {
os.Setenv("VAGRANT_CLOUD_TOKEN", "")
}()
if err := p.Configure(config); err != nil {
t.Fatalf("err: %s", err)
}
if p.config.AccessToken != "bar" {
t.Fatalf("Expected to get token from VAGRANT_CLOUD_TOKEN env var. Got '%s' instead",
p.config.AccessToken)
}
}
func TestPostProcessor_Configure_fromAtlasEnv(t *testing.T) {
var p PostProcessor
config := testGoodConfig()
config["access_token"] = ""
server := newSecureServer("foo", nil)
defer server.Close()
config["vagrant_cloud_url"] = server.URL
os.Setenv("ATLAS_TOKEN", "foo")
defer func() {
os.Setenv("ATLAS_TOKEN", "")
}()
if err := p.Configure(config); err != nil {
t.Fatalf("err: %s", err)
}
if p.config.AccessToken != "foo" {
t.Fatalf("Expected to get token from ATLAS_TOKEN env var. Got '%s' instead",
p.config.AccessToken)
}
if !p.warnAtlasToken {
t.Fatal("Expected warn flag to be set when getting token from atlas env var.")
}
}
func TestPostProcessor_Configure_Good(t *testing.T) {
config := testGoodConfig()
server := newSecureServer("foo", nil)
defer server.Close()
config["vagrant_cloud_url"] = server.URL
var p PostProcessor
if err := p.Configure(config); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestPostProcessor_Configure_Bad(t *testing.T) {
config := testBadConfig()
server := newSecureServer("foo", nil)
defer server.Close()
config["vagrant_cloud_url"] = server.URL
var p PostProcessor
if err := p.Configure(config); err == nil {
t.Fatalf("should have err")
}
}
func TestPostProcessor_Configure_checkAccessTokenIsRequiredByDefault(t *testing.T) {
var p PostProcessor
server := newSecureServer("foo", nil)
defer server.Close()
config := testNoAccessTokenProvidedConfig()
config["vagrant_cloud_url"] = server.URL
if err := p.Configure(config); err == nil {
t.Fatalf("Expected access token to be required.")
}
}
func TestPostProcessor_Configure_checkAccessTokenIsNotRequiredForOverridenVagrantCloud(t *testing.T) {
var p PostProcessor
server := newNoAuthServer(nil)
defer server.Close()
config := testNoAccessTokenProvidedConfig()
config["vagrant_cloud_url"] = server.URL
if err := p.Configure(config); err != nil {
t.Fatalf("Expected blank access token to be allowed and authenticate to pass: %s", err)
}
}
func TestPostProcessor_PostProcess_checkArtifactType(t *testing.T) {
artifact := &packersdk.MockArtifact{
BuilderIdValue: "invalid.builder",
}
config := testGoodConfig()
server := newSecureServer("foo", nil)
defer server.Close()
config["vagrant_cloud_url"] = server.URL
var p PostProcessor
p.Configure(config)
_, _, _, err := p.PostProcess(context.Background(), testUi(), artifact)
if !strings.Contains(err.Error(), "Unknown artifact type") {
t.Fatalf("Should error with message 'Unknown artifact type...' with BuilderId: %s", artifact.BuilderIdValue)
}
}
func TestPostProcessor_PostProcess_checkArtifactFileIsBox(t *testing.T) {
artifact := &packersdk.MockArtifact{
BuilderIdValue: "mitchellh.post-processor.vagrant", // good
FilesValue: []string{"invalid.boxfile"}, // should have .box extension
}
config := testGoodConfig()
server := newSecureServer("foo", nil)
defer server.Close()
config["vagrant_cloud_url"] = server.URL
var p PostProcessor
p.Configure(config)
_, _, _, err := p.PostProcess(context.Background(), testUi(), artifact)
if !strings.Contains(err.Error(), "Unknown files in artifact") {
t.Fatalf("Should error with message 'Unknown files in artifact...' with artifact file: %s",
artifact.FilesValue[0])
}
}
func TestPostProcessor_PostProcess_uploadsAndReleases(t *testing.T) {
files := tarFiles{
{"foo.txt", "This is a foo file"},
{"bar.txt", "This is a bar file"},
{"metadata.json", `{"provider": "virtualbox"}`},
}
boxfile, err := createBox(files)
if err != nil {
t.Fatalf("%s", err)
}
defer os.Remove(boxfile.Name())
artifact := &packersdk.MockArtifact{
BuilderIdValue: "mitchellh.post-processor.vagrant",
FilesValue: []string{boxfile.Name()},
}
s := newStackServer([]stubResponse{stubResponse{StatusCode: 200, Method: "PUT", Path: "/box-upload-path"}})
defer s.Close()
stack := []stubResponse{
stubResponse{StatusCode: 200, Method: "GET", Path: "/authenticate"},
stubResponse{StatusCode: 200, Method: "GET", Path: "/box/hashicorp/precise64", Response: `{"tag": "hashicorp/precise64"}`},
stubResponse{StatusCode: 200, Method: "POST", Path: "/box/hashicorp/precise64/versions", Response: `{}`},
stubResponse{StatusCode: 200, Method: "POST", Path: "/box/hashicorp/precise64/version/0.5/providers", Response: `{}`},
stubResponse{StatusCode: 200, Method: "GET", Path: "/box/hashicorp/precise64/version/0.5/provider/id/upload", Response: `{"upload_path": "` + s.URL + `/box-upload-path"}`},
stubResponse{StatusCode: 200, Method: "PUT", Path: "/box/hashicorp/precise64/version/0.5/release"},
}
server := newStackServer(stack)
defer server.Close()
config := testGoodConfig()
config["vagrant_cloud_url"] = server.URL
config["no_direct_upload"] = true
var p PostProcessor
err = p.Configure(config)
if err != nil {
t.Fatalf("err: %s", err)
}
_, _, _, err = p.PostProcess(context.Background(), testUi(), artifact)
if err != nil {
t.Fatalf("err: %s", err)
}
}
func TestPostProcessor_PostProcess_uploadsAndNoRelease(t *testing.T) {
files := tarFiles{
{"foo.txt", "This is a foo file"},
{"bar.txt", "This is a bar file"},
{"metadata.json", `{"provider": "virtualbox"}`},
}
boxfile, err := createBox(files)
if err != nil {
t.Fatalf("%s", err)
}
defer os.Remove(boxfile.Name())
artifact := &packersdk.MockArtifact{
BuilderIdValue: "mitchellh.post-processor.vagrant",
FilesValue: []string{boxfile.Name()},
}
s := newStackServer([]stubResponse{stubResponse{StatusCode: 200, Method: "PUT", Path: "/box-upload-path"}})
defer s.Close()
stack := []stubResponse{
stubResponse{StatusCode: 200, Method: "GET", Path: "/authenticate"},
stubResponse{StatusCode: 200, Method: "GET", Path: "/box/hashicorp/precise64", Response: `{"tag": "hashicorp/precise64"}`},
stubResponse{StatusCode: 200, Method: "POST", Path: "/box/hashicorp/precise64/versions", Response: `{}`},
stubResponse{StatusCode: 200, Method: "POST", Path: "/box/hashicorp/precise64/version/0.5/providers", Response: `{}`},
stubResponse{StatusCode: 200, Method: "GET", Path: "/box/hashicorp/precise64/version/0.5/provider/id/upload", Response: `{"upload_path": "` + s.URL + `/box-upload-path"}`},
}
server := newStackServer(stack)
defer server.Close()
config := testGoodConfig()
config["vagrant_cloud_url"] = server.URL
config["no_direct_upload"] = true
config["no_release"] = true
var p PostProcessor
err = p.Configure(config)
if err != nil {
t.Fatalf("err: %s", err)
}
_, _, _, err = p.PostProcess(context.Background(), testUi(), artifact)
if err != nil {
t.Fatalf("err: %s", err)
}
}
func TestPostProcessor_PostProcess_directUpload5GFile(t *testing.T) {
// Disable test on Windows due to unreliable sparse file creation
if runtime.GOOS == "windows" {
return
}
// Boxes up to 5GB are supported for direct upload so
// set the box asset to be 5GB exactly
fSize := int64(5368709120)
files := tarFiles{
{"foo.txt", "This is a foo file"},
{"bar.txt", "This is a bar file"},
{"metadata.json", `{"provider": "virtualbox"}`},
}
f, err := createBox(files)
if err != nil {
t.Fatalf("%s", err)
}
defer os.Remove(f.Name())
if err := expandFile(f, fSize); err != nil {
t.Fatalf("failed to expand box file - %s", err)
}
artifact := &packersdk.MockArtifact{
BuilderIdValue: "mitchellh.post-processor.vagrant",
FilesValue: []string{f.Name()},
}
f.Close()
s := newStackServer(
[]stubResponse{
stubResponse{StatusCode: 200, Method: "PUT", Path: "/box-upload-path"},
},
)
defer s.Close()
stack := []stubResponse{
stubResponse{StatusCode: 200, Method: "GET", Path: "/authenticate"},
stubResponse{StatusCode: 200, Method: "GET", Path: "/box/hashicorp/precise64", Response: `{"tag": "hashicorp/precise64"}`},
stubResponse{StatusCode: 200, Method: "POST", Path: "/box/hashicorp/precise64/versions", Response: `{}`},
stubResponse{StatusCode: 200, Method: "POST", Path: "/box/hashicorp/precise64/version/0.5/providers", Response: `{}`},
stubResponse{StatusCode: 200, Method: "GET", Path: "/box/hashicorp/precise64/version/0.5/provider/id/upload/direct"},
stubResponse{StatusCode: 200, Method: "PUT", Path: "/box-upload-complete"},
}
server := newStackServer(stack)
defer server.Close()
config := testGoodConfig()
config["vagrant_cloud_url"] = server.URL
config["no_release"] = true
// Set response here so we have API server URL available
stack[4].Response = `{"upload_path": "` + s.URL + `/box-upload-path", "callback": "` + server.URL + `/box-upload-complete"}`
var p PostProcessor
err = p.Configure(config)
if err != nil {
t.Fatalf("err: %s", err)
}
_, _, _, err = p.PostProcess(context.Background(), testUi(), artifact)
if err != nil {
t.Fatalf("err: %s", err)
}
}
func TestPostProcessor_PostProcess_directUploadOver5GFile(t *testing.T) {
// Disable test on Windows due to unreliable sparse file creation
if runtime.GOOS == "windows" {
return
}
// Boxes over 5GB are not supported for direct upload so
// set the box asset to be one byte over 5GB
fSize := int64(5368709121)
files := tarFiles{
{"foo.txt", "This is a foo file"},
{"bar.txt", "This is a bar file"},
{"metadata.json", `{"provider": "virtualbox"}`},
}
f, err := createBox(files)
if err != nil {
t.Fatalf("%s", err)
}
defer os.Remove(f.Name())
if err := expandFile(f, fSize); err != nil {
t.Fatalf("failed to expand box file - %s", err)
}
f.Close()
artifact := &packersdk.MockArtifact{
BuilderIdValue: "mitchellh.post-processor.vagrant",
FilesValue: []string{f.Name()},
}
s := newStackServer(
[]stubResponse{
stubResponse{StatusCode: 200, Method: "PUT", Path: "/box-upload-path"},
},
)
defer s.Close()
stack := []stubResponse{
stubResponse{StatusCode: 200, Method: "GET", Path: "/authenticate"},
stubResponse{StatusCode: 200, Method: "GET", Path: "/box/hashicorp/precise64", Response: `{"tag": "hashicorp/precise64"}`},
stubResponse{StatusCode: 200, Method: "POST", Path: "/box/hashicorp/precise64/versions", Response: `{}`},
stubResponse{StatusCode: 200, Method: "POST", Path: "/box/hashicorp/precise64/version/0.5/providers", Response: `{}`},
stubResponse{StatusCode: 200, Method: "GET", Path: "/box/hashicorp/precise64/version/0.5/provider/id/upload", Response: `{"upload_path": "` + s.URL + `/box-upload-path"}`},
}
server := newStackServer(stack)
defer server.Close()
config := testGoodConfig()
config["vagrant_cloud_url"] = server.URL
config["no_release"] = true
var p PostProcessor
err = p.Configure(config)
if err != nil {
t.Fatalf("err: %s", err)
}
_, _, _, err = p.PostProcess(context.Background(), testUi(), artifact)
if err != nil {
t.Fatalf("err: %s", err)
}
}
func TestPostProcessor_PostProcess_uploadsDirectAndReleases(t *testing.T) {
files := tarFiles{
{"foo.txt", "This is a foo file"},
{"bar.txt", "This is a bar file"},
{"metadata.json", `{"provider": "virtualbox"}`},
}
boxfile, err := createBox(files)
if err != nil {
t.Fatalf("%s", err)
}
defer os.Remove(boxfile.Name())
artifact := &packersdk.MockArtifact{
BuilderIdValue: "mitchellh.post-processor.vagrant",
FilesValue: []string{boxfile.Name()},
}
s := newStackServer(
[]stubResponse{
stubResponse{StatusCode: 200, Method: "PUT", Path: "/box-upload-path"},
},
)
defer s.Close()
stack := []stubResponse{
stubResponse{StatusCode: 200, Method: "GET", Path: "/authenticate"},
stubResponse{StatusCode: 200, Method: "GET", Path: "/box/hashicorp/precise64", Response: `{"tag": "hashicorp/precise64"}`},
stubResponse{StatusCode: 200, Method: "POST", Path: "/box/hashicorp/precise64/versions", Response: `{}`},
stubResponse{StatusCode: 200, Method: "POST", Path: "/box/hashicorp/precise64/version/0.5/providers", Response: `{}`},
stubResponse{StatusCode: 200, Method: "GET", Path: "/box/hashicorp/precise64/version/0.5/provider/id/upload/direct"},
stubResponse{StatusCode: 200, Method: "PUT", Path: "/box-upload-complete"},
stubResponse{StatusCode: 200, Method: "PUT", Path: "/box/hashicorp/precise64/version/0.5/release"},
}
server := newStackServer(stack)
defer server.Close()
config := testGoodConfig()
config["vagrant_cloud_url"] = server.URL
// Set response here so we have API server URL available
stack[4].Response = `{"upload_path": "` + s.URL + `/box-upload-path", "callback": "` + server.URL + `/box-upload-complete"}`
var p PostProcessor
err = p.Configure(config)
if err != nil {
t.Fatalf("err: %s", err)
}
_, _, _, err = p.PostProcess(context.Background(), testUi(), artifact)
if err != nil {
t.Fatalf("err: %s", err)
}
}
func testUi() *packersdk.BasicUi {
return &packersdk.BasicUi{
Reader: new(bytes.Buffer),
Writer: new(bytes.Buffer),
}
}
func TestPostProcessor_ImplementsPostProcessor(t *testing.T) {
var _ packersdk.PostProcessor = new(PostProcessor)
}
func TestProviderFromBuilderName(t *testing.T) {
if providerFromBuilderName("foobar") != "foobar" {
t.Fatal("should copy unknown provider")
}
if providerFromBuilderName("vmware") != "vmware_desktop" {
t.Fatal("should convert provider")
}
}
func TestProviderFromVagrantBox_missing_box(t *testing.T) {
// Bad: Box does not exist
boxfile := "i_dont_exist.box"
_, err := providerFromVagrantBox(boxfile)
if err == nil {
t.Fatal("Should have error as box file does not exist")
}
t.Logf("%s", err)
}
func TestProviderFromVagrantBox_empty_box(t *testing.T) {
// Bad: Empty box file
boxfile, err := newBoxFile()
if err != nil {
t.Fatalf("%s", err)
}
defer os.Remove(boxfile.Name())
_, err = providerFromVagrantBox(boxfile.Name())
if err == nil {
t.Fatal("Should have error as box file is empty")
}
t.Logf("%s", err)
}
func TestProviderFromVagrantBox_gzip_only_box(t *testing.T) {
boxfile, err := newBoxFile()
if err != nil {
t.Fatalf("%s", err)
}
defer os.Remove(boxfile.Name())
// Bad: Box is just a plain gzip file
aw := gzip.NewWriter(boxfile)
_, err = aw.Write([]byte("foo content"))
if err != nil {
t.Fatal("Error zipping test box file")
}
aw.Close() // Flush the gzipped contents to file
_, err = providerFromVagrantBox(boxfile.Name())
if err == nil {
t.Fatalf("Should have error as box file is a plain gzip file: %s", err)
}
t.Logf("%s", err)
}
func TestProviderFromVagrantBox_no_files_in_archive(t *testing.T) {
// Bad: Box contains no files
boxfile, err := createBox(tarFiles{})
if err != nil {
t.Fatalf("Error creating test box: %s", err)
}
defer os.Remove(boxfile.Name())
_, err = providerFromVagrantBox(boxfile.Name())
if err == nil {
t.Fatalf("Should have error as box file has no contents")
}
t.Logf("%s", err)
}
func TestProviderFromVagrantBox_no_metadata(t *testing.T) {
// Bad: Box contains no metadata/metadata.json file
files := tarFiles{
{"foo.txt", "This is a foo file"},
{"bar.txt", "This is a bar file"},
}
boxfile, err := createBox(files)
if err != nil {
t.Fatalf("Error creating test box: %s", err)
}
defer os.Remove(boxfile.Name())
_, err = providerFromVagrantBox(boxfile.Name())
if err == nil {
t.Fatalf("Should have error as box file does not include metadata.json file")
}
t.Logf("%s", err)
}
func TestProviderFromVagrantBox_metadata_empty(t *testing.T) {
// Bad: Create a box with an empty metadata.json file
files := tarFiles{
{"foo.txt", "This is a foo file"},
{"bar.txt", "This is a bar file"},
{"metadata.json", ""},
}
boxfile, err := createBox(files)
if err != nil {
t.Fatalf("Error creating test box: %s", err)
}
defer os.Remove(boxfile.Name())
_, err = providerFromVagrantBox(boxfile.Name())
if err == nil {
t.Fatalf("Should have error as box files metadata.json file is empty")
}
t.Logf("%s", err)
}
func TestProviderFromVagrantBox_metadata_bad_json(t *testing.T) {
// Bad: Create a box with bad JSON in the metadata.json file
files := tarFiles{
{"foo.txt", "This is a foo file"},
{"bar.txt", "This is a bar file"},
{"metadata.json", "{provider: badjson}"},
}
boxfile, err := createBox(files)
if err != nil {
t.Fatalf("Error creating test box: %s", err)
}
defer os.Remove(boxfile.Name())
_, err = providerFromVagrantBox(boxfile.Name())
if err == nil {
t.Fatalf("Should have error as box files metadata.json file contains badly formatted JSON")
}
t.Logf("%s", err)
}
func TestProviderFromVagrantBox_metadata_no_provider_key(t *testing.T) {
// Bad: Create a box with no 'provider' key in the metadata.json file
files := tarFiles{
{"foo.txt", "This is a foo file"},
{"bar.txt", "This is a bar file"},
{"metadata.json", `{"cows":"moo"}`},
}
boxfile, err := createBox(files)
if err != nil {
t.Fatalf("Error creating test box: %s", err)
}
defer os.Remove(boxfile.Name())
_, err = providerFromVagrantBox(boxfile.Name())
if err == nil {
t.Fatalf("Should have error as provider key/value pair is missing from boxes metadata.json file")
}
t.Logf("%s", err)
}
func TestProviderFromVagrantBox_metadata_provider_value_empty(t *testing.T) {
// Bad: The boxes metadata.json file 'provider' key has an empty value
files := tarFiles{
{"foo.txt", "This is a foo file"},
{"bar.txt", "This is a bar file"},
{"metadata.json", `{"provider":""}`},
}
boxfile, err := createBox(files)
if err != nil {
t.Fatalf("Error creating test box: %s", err)
}
defer os.Remove(boxfile.Name())
_, err = providerFromVagrantBox(boxfile.Name())
if err == nil {
t.Fatalf("Should have error as value associated with 'provider' key in boxes metadata.json file is empty")
}
t.Logf("%s", err)
}
func TestProviderFromVagrantBox_metadata_ok(t *testing.T) {
// Good: The boxes metadata.json file has the 'provider' key/value pair
expectedProvider := "virtualbox"
files := tarFiles{
{"foo.txt", "This is a foo file"},
{"bar.txt", "This is a bar file"},
{"metadata.json", `{"provider":"` + expectedProvider + `"}`},
}
boxfile, err := createBox(files)
if err != nil {
t.Fatalf("Error creating test box: %s", err)
}
defer os.Remove(boxfile.Name())
provider, err := providerFromVagrantBox(boxfile.Name())
if err != nil {
t.Fatalf("error getting provider from vagrant box %s:%v", boxfile.Name(), err)
}
assert.Equal(t, expectedProvider, provider, "Error: Expected provider: '%s'. Got '%s'", expectedProvider, provider)
t.Logf("Expected provider '%s'. Got provider '%s'", expectedProvider, provider)
}
func TestGetProvider_artifice(t *testing.T) {
expectedProvider := "virtualbox"
files := tarFiles{
{"foo.txt", "This is a foo file"},
{"bar.txt", "This is a bar file"},
{"metadata.json", `{"provider":"` + expectedProvider + `"}`},
}
boxfile, err := createBox(files)
if err != nil {
t.Fatalf("Error creating test box: %s", err)
}
defer os.Remove(boxfile.Name())
provider, err := getProvider("", boxfile.Name(), "artifice")
if err != nil {
t.Fatalf("error getting provider %s:%v", boxfile.Name(), err)
}
assert.Equal(t, expectedProvider, provider, "Error: Expected provider: '%s'. Got '%s'", expectedProvider, provider)
t.Logf("Expected provider '%s'. Got provider '%s'", expectedProvider, provider)
}
func TestGetProvider_other(t *testing.T) {
expectedProvider := "virtualbox"
provider, _ := getProvider(expectedProvider, "foo.box", "other")
assert.Equal(t, expectedProvider, provider, "Error: Expected provider: '%s'. Got '%s'", expectedProvider, provider)
t.Logf("Expected provider '%s'. Got provider '%s'", expectedProvider, provider)
}
func newBoxFile() (boxfile *os.File, err error) {
boxfile, err = ioutil.TempFile(os.TempDir(), "test*.box")
if err != nil {
return boxfile, fmt.Errorf("Error creating test box file: %s", err)
}
return boxfile, nil
}
func createBox(files tarFiles) (boxfile *os.File, err error) {
boxfile, err = newBoxFile()
if err != nil {
return boxfile, err
}
// Box files are gzipped tar archives
aw := gzip.NewWriter(boxfile)
tw := tar.NewWriter(aw)
// Add each file to the box
for _, file := range files {
// Create and write the tar file header
hdr := &tar.Header{
Name: file.Name,
Mode: 0644,
Size: int64(len(file.Body)),
}
err = tw.WriteHeader(hdr)
if err != nil {
return boxfile, fmt.Errorf("Error writing box tar file header: %s", err)
}
// Write the file contents
_, err = tw.Write([]byte(file.Body))
if err != nil {
return boxfile, fmt.Errorf("Error writing box tar file contents: %s", err)
}
}
// Flush and close each writer
err = tw.Close()
if err != nil {
return boxfile, fmt.Errorf("Error flushing tar file contents: %s", err)
}
err = aw.Close()
if err != nil {
return boxfile, fmt.Errorf("Error flushing gzip file contents: %s", err)
}
return boxfile, nil
}
func expandFile(f *os.File, finalSize int64) (err error) {
s, err := f.Stat()
if err != nil {
return
}
size := finalSize - s.Size()
if size < 1 {
return
}
if _, err = f.Seek(size-1, 2); err != nil {
return
}
if _, err = f.Write([]byte{0}); err != nil {
return
}
return nil
}

View File

@ -1,13 +0,0 @@
package version
import (
"github.com/hashicorp/packer-plugin-sdk/version"
packerVersion "github.com/hashicorp/packer/version"
)
var VagrantCloudPluginVersion *version.PluginVersion
func init() {
VagrantCloudPluginVersion = version.InitializePluginVersion(
packerVersion.Version, packerVersion.VersionPrerelease)
}

View File

@ -1,22 +0,0 @@
package vagrant
import (
"testing"
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
)
func TestArtifact_ImplementsArtifact(t *testing.T) {
var raw interface{}
raw = &Artifact{}
if _, ok := raw.(packersdk.Artifact); !ok {
t.Fatalf("Artifact should be a Artifact")
}
}
func TestArtifact_Id(t *testing.T) {
artifact := NewArtifact("vmware", "./")
if artifact.Id() != "vmware" {
t.Fatalf("should return name as Id")
}
}

View File

@ -1,37 +0,0 @@
package vagrant
import (
"strings"
"testing"
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
)
func TestAWSProvider_impl(t *testing.T) {
var _ Provider = new(AWSProvider)
}
func TestAWSProvider_KeepInputArtifact(t *testing.T) {
p := new(AWSProvider)
if !p.KeepInputArtifact() {
t.Fatal("should keep input artifact")
}
}
func TestAWSProvider_ArtifactId(t *testing.T) {
p := new(AWSProvider)
ui := testUi()
artifact := &packersdk.MockArtifact{
IdValue: "us-east-1:ami-1234",
}
vagrantfile, _, err := p.Process(ui, artifact, "foo")
if err != nil {
t.Fatalf("should not have error: %s", err)
}
result := `aws.region_config "us-east-1", ami: "ami-1234"`
if !strings.Contains(vagrantfile, result) {
t.Fatalf("wrong substitution: %s", vagrantfile)
}
}

View File

@ -1,94 +0,0 @@
package vagrant
import (
"strings"
"testing"
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
)
func TestAzureProvider_impl(t *testing.T) {
var _ Provider = new(AzureProvider)
}
func TestAzureProvider_KeepInputArtifact(t *testing.T) {
p := new(AzureProvider)
if !p.KeepInputArtifact() {
t.Fatal("should keep input artifact")
}
}
func TestAzureProvider_ManagedImage(t *testing.T) {
p := new(AzureProvider)
ui := testUi()
artifact := &packersdk.MockArtifact{
StringValue: `Azure.ResourceManagement.VMImage:
OSType: Linux
ManagedImageResourceGroupName: packerruns
ManagedImageName: packer-1533651633
ManagedImageId: /subscriptions/e6229913-d9c3-4ddd-99a4-9e1ef3beaa1b/resourceGroups/packerruns/providers/Microsoft.Compute/images/packer-1533675589
ManagedImageLocation: westus`,
}
vagrantfile, _, err := p.Process(ui, artifact, "foo")
if err != nil {
t.Fatalf("should not have error: %s", err)
}
result := `azure.location = "westus"`
if !strings.Contains(vagrantfile, result) {
t.Fatalf("wrong substitution: %s", vagrantfile)
}
result = `azure.vm_managed_image_id = "/subscriptions/e6229913-d9c3-4ddd-99a4-9e1ef3beaa1b/resourceGroups/packerruns/providers/Microsoft.Compute/images/packer-1533675589"`
if !strings.Contains(vagrantfile, result) {
t.Fatalf("wrong substitution: %s", vagrantfile)
}
// DO NOT set resource group in Vagrantfile, it should be separate from the image
result = `azure.resource_group_name`
if strings.Contains(vagrantfile, result) {
t.Fatalf("wrong substitution: %s", vagrantfile)
}
result = `azure.vm_operating_system`
if strings.Contains(vagrantfile, result) {
t.Fatalf("wrong substitution: %s", vagrantfile)
}
}
func TestAzureProvider_VHD(t *testing.T) {
p := new(AzureProvider)
ui := testUi()
artifact := &packersdk.MockArtifact{
IdValue: "https://packerbuildswest.blob.core.windows.net/system/Microsoft.Compute/Images/images/packer-osDisk.96ed2120-591d-4900-95b0-ee8e985f2213.vhd",
StringValue: `Azure.ResourceManagement.VMImage:
OSType: Linux
StorageAccountLocation: westus
OSDiskUri: https://packerbuildswest.blob.core.windows.net/system/Microsoft.Compute/Images/images/packer-osDisk.96ed2120-591d-4900-95b0-ee8e985f2213.vhd
OSDiskUriReadOnlySas: https://packerbuildswest.blob.core.windows.net/system/Microsoft.Compute/Images/images/packer-osDisk.96ed2120-591d-4900-95b0-ee8e985f2213.vhd?se=2018-09-07T18%3A36%3A34Z&sig=xUiFvwAviPYoP%2Bc91vErqvwYR1eK4x%2BAx7YLMe84zzU%3D&sp=r&sr=b&sv=2016-05-31
TemplateUri: https://packerbuildswest.blob.core.windows.net/system/Microsoft.Compute/Images/images/packer-vmTemplate.96ed2120-591d-4900-95b0-ee8e985f2213.json
TemplateUriReadOnlySas: https://packerbuildswest.blob.core.windows.net/system/Microsoft.Compute/Images/images/packer-vmTemplate.96ed2120-591d-4900-95b0-ee8e985f2213.json?se=2018-09-07T18%3A36%3A34Z&sig=lDxePyAUCZbfkB5ddiofimXfwk5INn%2F9E2BsnqIKC9Q%3D&sp=r&sr=b&sv=2016-05-31`,
}
vagrantfile, _, err := p.Process(ui, artifact, "foo")
if err != nil {
t.Fatalf("should not have error: %s", err)
}
result := `azure.location = "westus"`
if !strings.Contains(vagrantfile, result) {
t.Fatalf("wrong substitution: %s", vagrantfile)
}
result = `azure.vm_vhd_uri = "https://packerbuildswest.blob.core.windows.net/system/Microsoft.Compute/Images/images/packer-osDisk.96ed2120-591d-4900-95b0-ee8e985f2213.vhd"`
if !strings.Contains(vagrantfile, result) {
t.Fatalf("wrong substitution: %s", vagrantfile)
}
result = `azure.vm_operating_system = "Linux"`
if !strings.Contains(vagrantfile, result) {
t.Fatalf("wrong substitution: %s", vagrantfile)
}
// DO NOT set resource group in Vagrantfile, it should be separate from the image
result = `azure.resource_group_name`
if strings.Contains(vagrantfile, result) {
t.Fatalf("wrong substitution: %s", vagrantfile)
}
}

View File

@ -1,41 +0,0 @@
package vagrant
import (
"strings"
"testing"
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
)
func TestDigitalOceanProvider_impl(t *testing.T) {
var _ Provider = new(DigitalOceanProvider)
}
func TestDigitalOceanProvider_KeepInputArtifact(t *testing.T) {
p := new(DigitalOceanProvider)
if !p.KeepInputArtifact() {
t.Fatal("should keep input artifact")
}
}
func TestDigitalOceanProvider_ArtifactId(t *testing.T) {
p := new(DigitalOceanProvider)
ui := testUi()
artifact := &packersdk.MockArtifact{
IdValue: "San Francisco:42",
}
vagrantfile, _, err := p.Process(ui, artifact, "foo")
if err != nil {
t.Fatalf("should not have error: %s", err)
}
image := `digital_ocean.image = "42"`
if !strings.Contains(vagrantfile, image) {
t.Fatalf("wrong image substitution: %s", vagrantfile)
}
region := `digital_ocean.region = "San Francisco"`
if !strings.Contains(vagrantfile, region) {
t.Fatalf("wrong region substitution: %s", vagrantfile)
}
}

View File

@ -1,9 +0,0 @@
package vagrant
import (
"testing"
)
func TestDockerProvider_impl(t *testing.T) {
var _ Provider = new(DockerProvider)
}

View File

@ -1,37 +0,0 @@
package vagrant
import (
"strings"
"testing"
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
)
func TestGoogleProvider_impl(t *testing.T) {
var _ Provider = new(GoogleProvider)
}
func TestGoogleProvider_KeepInputArtifact(t *testing.T) {
p := new(GoogleProvider)
if !p.KeepInputArtifact() {
t.Fatal("should keep input artifact")
}
}
func TestGoogleProvider_ArtifactId(t *testing.T) {
p := new(GoogleProvider)
ui := testUi()
artifact := &packersdk.MockArtifact{
IdValue: "packer-1234",
}
vagrantfile, _, err := p.Process(ui, artifact, "foo")
if err != nil {
t.Fatalf("should not have error: %s", err)
}
result := `google.image = "packer-1234"`
if !strings.Contains(vagrantfile, result) {
t.Fatalf("wrong substitution: %s", vagrantfile)
}
}

View File

@ -1,69 +0,0 @@
package vagrant
import (
"fmt"
"testing"
)
func assertSizeInMegabytes(t *testing.T, size string, expected uint64) {
actual := sizeInMegabytes(size)
if actual != expected {
t.Fatalf("the size `%s` was converted to `%d` but expected `%d`", size, actual, expected)
}
}
func Test_sizeInMegabytes_WithInvalidUnitMustPanic(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Fatalf("expected a panic but got none")
}
}()
sizeInMegabytes("1234x")
}
func Test_sizeInMegabytes_WithoutUnitMustDefaultToMegabytes(t *testing.T) {
assertSizeInMegabytes(t, "1234", 1234)
}
func Test_sizeInMegabytes_WithBytesUnit(t *testing.T) {
assertSizeInMegabytes(t, fmt.Sprintf("%db", 1234*1024*1024), 1234)
assertSizeInMegabytes(t, fmt.Sprintf("%dB", 1234*1024*1024), 1234)
assertSizeInMegabytes(t, "1B", 0)
}
func Test_sizeInMegabytes_WithKiloBytesUnit(t *testing.T) {
assertSizeInMegabytes(t, fmt.Sprintf("%dk", 1234*1024), 1234)
assertSizeInMegabytes(t, fmt.Sprintf("%dK", 1234*1024), 1234)
assertSizeInMegabytes(t, "1K", 0)
}
func Test_sizeInMegabytes_WithMegabytesUnit(t *testing.T) {
assertSizeInMegabytes(t, "1234m", 1234)
assertSizeInMegabytes(t, "1234M", 1234)
assertSizeInMegabytes(t, "1M", 1)
}
func Test_sizeInMegabytes_WithGigabytesUnit(t *testing.T) {
assertSizeInMegabytes(t, "1234g", 1234*1024)
assertSizeInMegabytes(t, "1234G", 1234*1024)
assertSizeInMegabytes(t, "1G", 1*1024)
}
func Test_sizeInMegabytes_WithTerabytesUnit(t *testing.T) {
assertSizeInMegabytes(t, "1234t", 1234*1024*1024)
assertSizeInMegabytes(t, "1234T", 1234*1024*1024)
assertSizeInMegabytes(t, "1T", 1*1024*1024)
}
func Test_sizeInMegabytes_WithPetabytesUnit(t *testing.T) {
assertSizeInMegabytes(t, "1234p", 1234*1024*1024*1024)
assertSizeInMegabytes(t, "1234P", 1234*1024*1024*1024)
assertSizeInMegabytes(t, "1P", 1*1024*1024*1024)
}
func Test_sizeInMegabytes_WithExabytesUnit(t *testing.T) {
assertSizeInMegabytes(t, "1234e", 1234*1024*1024*1024*1024)
assertSizeInMegabytes(t, "1234E", 1234*1024*1024*1024*1024)
assertSizeInMegabytes(t, "1E", 1*1024*1024*1024*1024)
}

View File

@ -1,9 +0,0 @@
package vagrant
import (
"testing"
)
func TestLXCProvider_impl(t *testing.T) {
var _ Provider = new(LXCProvider)
}

View File

@ -1,9 +0,0 @@
package vagrant
import (
"testing"
)
func TestParallelsProvider_impl(t *testing.T) {
var _ Provider = new(ParallelsProvider)
}

View File

@ -1,239 +0,0 @@
package vagrant
import (
"bytes"
"compress/flate"
"context"
"io/ioutil"
"os"
"strings"
"testing"
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
)
func testConfig() map[string]interface{} {
return map[string]interface{}{}
}
func testPP(t *testing.T) *PostProcessor {
var p PostProcessor
if err := p.Configure(testConfig()); err != nil {
t.Fatalf("err: %s", err)
}
return &p
}
func testUi() *packersdk.BasicUi {
return &packersdk.BasicUi{
Reader: new(bytes.Buffer),
Writer: new(bytes.Buffer),
}
}
func TestPostProcessor_ImplementsPostProcessor(t *testing.T) {
var _ packersdk.PostProcessor = new(PostProcessor)
}
func TestPostProcessorPrepare_compressionLevel(t *testing.T) {
var p PostProcessor
// Default
c := testConfig()
delete(c, "compression_level")
if err := p.Configure(c); err != nil {
t.Fatalf("err: %s", err)
}
config := p.config
if config.CompressionLevel != flate.DefaultCompression {
t.Fatalf("bad: %#v", config.CompressionLevel)
}
// Set
c = testConfig()
c["compression_level"] = 7
if err := p.Configure(c); err != nil {
t.Fatalf("err: %s", err)
}
config = p.config
if config.CompressionLevel != 7 {
t.Fatalf("bad: %#v", config.CompressionLevel)
}
}
func TestPostProcessorPrepare_outputPath(t *testing.T) {
var p PostProcessor
// Default
c := testConfig()
delete(c, "output")
err := p.Configure(c)
if err != nil {
t.Fatalf("err: %s", err)
}
// Bad template
c["output"] = "bad {{{{.Template}}}}"
err = p.Configure(c)
if err == nil {
t.Fatal("should have error")
}
}
func TestSpecificConfig(t *testing.T) {
var p PostProcessor
// Default
c := testConfig()
c["compression_level"] = 1
c["output"] = "folder"
c["override"] = map[string]interface{}{
"aws": map[string]interface{}{
"compression_level": 7,
},
}
if err := p.Configure(c); err != nil {
t.Fatalf("err: %s", err)
}
// overrides config
config, err := p.specificConfig("aws")
if err != nil {
t.Fatalf("err: %s", err)
}
if config.CompressionLevel != 7 {
t.Fatalf("bad: %#v", config.CompressionLevel)
}
if config.OutputPath != "folder" {
t.Fatalf("bad: %#v", config.OutputPath)
}
// does NOT overrides config
config, err = p.specificConfig("virtualbox")
if err != nil {
t.Fatalf("err: %s", err)
}
if config.CompressionLevel != 1 {
t.Fatalf("bad: %#v", config.CompressionLevel)
}
if config.OutputPath != "folder" {
t.Fatalf("bad: %#v", config.OutputPath)
}
}
func TestPostProcessorPrepare_vagrantfileTemplateExists(t *testing.T) {
f, err := ioutil.TempFile("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
name := f.Name()
c := testConfig()
c["vagrantfile_template"] = name
if err := f.Close(); err != nil {
t.Fatalf("err: %s", err)
}
var p PostProcessor
if err := p.Configure(c); err != nil {
t.Fatal("no error expected as vagrantfile_template exists")
}
if err := os.Remove(name); err != nil {
t.Fatalf("err: %s", err)
}
if err := p.Configure(c); err == nil {
t.Fatal("expected error since vagrantfile_template does not exist and vagrantfile_template_generated is unset")
}
// The vagrantfile_template will be generated during the build process
c["vagrantfile_template_generated"] = true
if err := p.Configure(c); err != nil {
t.Fatal("no error expected due to missing vagrantfile_template as vagrantfile_template_generated is set")
}
}
func TestPostProcessorPrepare_ProviderOverrideExists(t *testing.T) {
c := testConfig()
c["provider_override"] = "foo"
var p PostProcessor
if err := p.Configure(c); err == nil {
t.Fatal("Should have errored since foo is not a valid vagrant provider")
}
c = testConfig()
c["provider_override"] = "aws"
if err := p.Configure(c); err != nil {
t.Fatal("Should not have errored since aws is a valid vagrant provider")
}
}
func TestPostProcessorPostProcess_badId(t *testing.T) {
artifact := &packersdk.MockArtifact{
BuilderIdValue: "invalid.packer",
}
_, _, _, err := testPP(t).PostProcess(context.Background(), testUi(), artifact)
if !strings.Contains(err.Error(), "artifact type") {
t.Fatalf("err: %s", err)
}
}
func TestPostProcessorPostProcess_vagrantfileUserVariable(t *testing.T) {
var p PostProcessor
f, err := ioutil.TempFile("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.Remove(f.Name())
c := map[string]interface{}{
"packer_user_variables": map[string]string{
"foo": f.Name(),
},
"vagrantfile_template": "{{user `foo`}}",
}
err = p.Configure(c)
if err != nil {
t.Fatalf("err: %s", err)
}
a := &packersdk.MockArtifact{
BuilderIdValue: "packer.parallels",
}
a2, _, _, err := p.PostProcess(context.Background(), testUi(), a)
if a2 != nil {
for _, fn := range a2.Files() {
defer os.Remove(fn)
}
}
if err != nil {
t.Fatalf("err: %s", err)
}
}
func TestProviderForName(t *testing.T) {
if v, ok := providerForName("virtualbox").(*VBoxProvider); !ok {
t.Fatalf("bad: %#v", v)
}
if providerForName("nope") != nil {
t.Fatal("should be nil if bad provider")
}
}

View File

@ -1,13 +0,0 @@
package version
import (
"github.com/hashicorp/packer-plugin-sdk/version"
packerVersion "github.com/hashicorp/packer/version"
)
var VagrantPostprocessorVersion *version.PluginVersion
func init() {
VagrantPostprocessorVersion = version.InitializePluginVersion(
packerVersion.Version, packerVersion.VersionPrerelease)
}

View File

@ -1,28 +0,0 @@
package vagrant
import (
"os"
"path/filepath"
"testing"
"github.com/hashicorp/packer-plugin-sdk/tmp"
"github.com/stretchr/testify/assert"
)
func TestVBoxProvider_impl(t *testing.T) {
var _ Provider = new(VBoxProvider)
}
func TestDecomressOVA(t *testing.T) {
td, err := tmp.Dir("pp-vagrant-virtualbox")
assert.NoError(t, err)
defer os.RemoveAll(td)
fixture := "./test-fixtures/decompress-tar/outside_parent.tar"
err = DecompressOva(td, fixture)
assert.NoError(t, err)
_, err = os.Stat(filepath.Join(filepath.Base(td), "demo.poc"))
assert.Error(t, err)
_, err = os.Stat(filepath.Join(td, "demo.poc"))
assert.NoError(t, err)
}

View File

@ -1,9 +0,0 @@
package vagrant
import (
"testing"
)
func TestVMwareProvider_impl(t *testing.T) {
var _ Provider = new(VMwareProvider)
}

View File

@ -0,0 +1,373 @@
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

View File

@ -21,7 +21,6 @@ import (
"github.com/hashicorp/packer-plugin-sdk/template/config" "github.com/hashicorp/packer-plugin-sdk/template/config"
"github.com/hashicorp/packer-plugin-sdk/template/interpolate" "github.com/hashicorp/packer-plugin-sdk/template/interpolate"
"github.com/hashicorp/packer-plugin-sdk/tmp" "github.com/hashicorp/packer-plugin-sdk/tmp"
"github.com/hashicorp/packer/post-processor/artifice"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
) )
@ -217,7 +216,7 @@ func (p *PostProcessor) PostProcess(ctx context.Context, ui packersdk.Ui, artifa
provider := providerForName(name) provider := providerForName(name)
if provider == nil { if provider == nil {
if artifact.BuilderId() == artifice.BuilderId { if artifact.BuilderId() == "packer.post-processor.artifice" {
return nil, false, false, fmt.Errorf( return nil, false, false, fmt.Errorf(
"Unknown provider type: When using an artifact created by " + "Unknown provider type: When using an artifact created by " +
"the artifice post-processor, you need to set the " + "the artifice post-processor, you need to set the " +

View File

@ -645,15 +645,15 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
d.fill = (*compressor).fillBlock d.fill = (*compressor).fillBlock
d.step = (*compressor).store d.step = (*compressor).store
case level == ConstantCompression: case level == ConstantCompression:
d.w.logNewTablePenalty = 4 d.w.logNewTablePenalty = 8
d.window = make([]byte, maxStoreBlockSize) d.window = make([]byte, 32<<10)
d.fill = (*compressor).fillBlock d.fill = (*compressor).fillBlock
d.step = (*compressor).storeHuff d.step = (*compressor).storeHuff
case level == DefaultCompression: case level == DefaultCompression:
level = 5 level = 5
fallthrough fallthrough
case level >= 1 && level <= 6: case level >= 1 && level <= 6:
d.w.logNewTablePenalty = 6 d.w.logNewTablePenalty = 8
d.fast = newFastEnc(level) d.fast = newFastEnc(level)
d.window = make([]byte, maxStoreBlockSize) d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillBlock d.fill = (*compressor).fillBlock

View File

@ -6,6 +6,7 @@
package flate package flate
import ( import (
"encoding/binary"
"fmt" "fmt"
"math/bits" "math/bits"
) )
@ -65,26 +66,15 @@ func load32(b []byte, i int) uint32 {
} }
func load64(b []byte, i int) uint64 { func load64(b []byte, i int) uint64 {
// Help the compiler eliminate bounds checks on the read so it can be done in a single read. return binary.LittleEndian.Uint64(b[i:])
b = b[i:]
b = b[:8]
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
} }
func load3232(b []byte, i int32) uint32 { func load3232(b []byte, i int32) uint32 {
// Help the compiler eliminate bounds checks on the read so it can be done in a single read. return binary.LittleEndian.Uint32(b[i:])
b = b[i:]
b = b[:4]
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
} }
func load6432(b []byte, i int32) uint64 { func load6432(b []byte, i int32) uint64 {
// Help the compiler eliminate bounds checks on the read so it can be done in a single read. return binary.LittleEndian.Uint64(b[i:])
b = b[i:]
b = b[:8]
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
} }
func hash(u uint32) uint32 { func hash(u uint32) uint32 {
@ -225,9 +215,9 @@ func (e *fastGen) Reset() {
func matchLen(a, b []byte) int { func matchLen(a, b []byte) int {
b = b[:len(a)] b = b[:len(a)]
var checked int var checked int
if len(a) > 4 { if len(a) >= 4 {
// Try 4 bytes first // Try 4 bytes first
if diff := load32(a, 0) ^ load32(b, 0); diff != 0 { if diff := binary.LittleEndian.Uint32(a) ^ binary.LittleEndian.Uint32(b); diff != 0 {
return bits.TrailingZeros32(diff) >> 3 return bits.TrailingZeros32(diff) >> 3
} }
// Switch to 8 byte matching. // Switch to 8 byte matching.
@ -236,7 +226,7 @@ func matchLen(a, b []byte) int {
b = b[4:] b = b[4:]
for len(a) >= 8 { for len(a) >= 8 {
b = b[:len(a)] b = b[:len(a)]
if diff := load64(a, 0) ^ load64(b, 0); diff != 0 { if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
return checked + (bits.TrailingZeros64(diff) >> 3) return checked + (bits.TrailingZeros64(diff) >> 3)
} }
checked += 8 checked += 8
@ -247,7 +237,7 @@ func matchLen(a, b []byte) int {
b = b[:len(a)] b = b[:len(a)]
for i := range a { for i := range a {
if a[i] != b[i] { if a[i] != b[i] {
return int(i) + checked return i + checked
} }
} }
return len(a) + checked return len(a) + checked

View File

@ -5,6 +5,7 @@
package flate package flate
import ( import (
"encoding/binary"
"io" "io"
) )
@ -206,7 +207,7 @@ func (w *huffmanBitWriter) write(b []byte) {
} }
func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
w.bits |= uint64(b) << (w.nbits & reg16SizeMask64) w.bits |= uint64(b) << w.nbits
w.nbits += nb w.nbits += nb
if w.nbits >= 48 { if w.nbits >= 48 {
w.writeOutBits() w.writeOutBits()
@ -420,13 +421,11 @@ func (w *huffmanBitWriter) writeOutBits() {
w.bits >>= 48 w.bits >>= 48
w.nbits -= 48 w.nbits -= 48
n := w.nbytes n := w.nbytes
w.bytes[n] = byte(bits)
w.bytes[n+1] = byte(bits >> 8) // We over-write, but faster...
w.bytes[n+2] = byte(bits >> 16) binary.LittleEndian.PutUint64(w.bytes[n:], bits)
w.bytes[n+3] = byte(bits >> 24)
w.bytes[n+4] = byte(bits >> 32)
w.bytes[n+5] = byte(bits >> 40)
n += 6 n += 6
if n >= bufferFlushSize { if n >= bufferFlushSize {
if w.err != nil { if w.err != nil {
n = 0 n = 0
@ -435,6 +434,7 @@ func (w *huffmanBitWriter) writeOutBits() {
w.write(w.bytes[:n]) w.write(w.bytes[:n])
n = 0 n = 0
} }
w.nbytes = n w.nbytes = n
} }
@ -759,7 +759,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
} else { } else {
// inlined // inlined
c := lengths[lengthCode&31] c := lengths[lengthCode&31]
w.bits |= uint64(c.code) << (w.nbits & reg16SizeMask64) w.bits |= uint64(c.code) << w.nbits
w.nbits += c.len w.nbits += c.len
if w.nbits >= 48 { if w.nbits >= 48 {
w.writeOutBits() w.writeOutBits()
@ -779,7 +779,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
} else { } else {
// inlined // inlined
c := offs[offsetCode&31] c := offs[offsetCode&31]
w.bits |= uint64(c.code) << (w.nbits & reg16SizeMask64) w.bits |= uint64(c.code) << w.nbits
w.nbits += c.len w.nbits += c.len
if w.nbits >= 48 { if w.nbits >= 48 {
w.writeOutBits() w.writeOutBits()
@ -830,8 +830,8 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
// Assume header is around 70 bytes: // Assume header is around 70 bytes:
// https://stackoverflow.com/a/25454430 // https://stackoverflow.com/a/25454430
const guessHeaderSizeBits = 70 * 8 const guessHeaderSizeBits = 70 * 8
estBits, estExtra := histogramSize(input, w.literalFreq[:], !eof && !sync) estBits := histogramSize(input, w.literalFreq[:], !eof && !sync)
estBits += w.lastHeader + 15 estBits += w.lastHeader + len(input)/32
if w.lastHeader == 0 { if w.lastHeader == 0 {
estBits += guessHeaderSizeBits estBits += guessHeaderSizeBits
} }
@ -845,9 +845,9 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
return return
} }
reuseSize := 0
if w.lastHeader > 0 { if w.lastHeader > 0 {
reuseSize := w.literalEncoding.bitLength(w.literalFreq[:256]) reuseSize = w.literalEncoding.bitLength(w.literalFreq[:256])
estBits += estExtra
if estBits < reuseSize { if estBits < reuseSize {
// We owe an EOB // We owe an EOB
@ -859,6 +859,10 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
const numLiterals = endBlockMarker + 1 const numLiterals = endBlockMarker + 1
const numOffsets = 1 const numOffsets = 1
if w.lastHeader == 0 { if w.lastHeader == 0 {
if !eof && !sync {
// Generate a slightly suboptimal tree that can be used for all.
fillHist(w.literalFreq[:numLiterals])
}
w.literalFreq[endBlockMarker] = 1 w.literalFreq[endBlockMarker] = 1
w.literalEncoding.generate(w.literalFreq[:numLiterals], 15) w.literalEncoding.generate(w.literalFreq[:numLiterals], 15)
@ -878,19 +882,14 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
for _, t := range input { for _, t := range input {
// Bitwriting inlined, ~30% speedup // Bitwriting inlined, ~30% speedup
c := encoding[t] c := encoding[t]
w.bits |= uint64(c.code) << ((w.nbits) & reg16SizeMask64) w.bits |= uint64(c.code) << w.nbits
w.nbits += c.len w.nbits += c.len
if w.nbits >= 48 { if w.nbits >= 48 {
bits := w.bits bits := w.bits
w.bits >>= 48 w.bits >>= 48
w.nbits -= 48 w.nbits -= 48
n := w.nbytes n := w.nbytes
w.bytes[n] = byte(bits) binary.LittleEndian.PutUint64(w.bytes[n:], bits)
w.bytes[n+1] = byte(bits >> 8)
w.bytes[n+2] = byte(bits >> 16)
w.bytes[n+3] = byte(bits >> 24)
w.bytes[n+4] = byte(bits >> 32)
w.bytes[n+5] = byte(bits >> 40)
n += 6 n += 6
if n >= bufferFlushSize { if n >= bufferFlushSize {
if w.err != nil { if w.err != nil {

View File

@ -122,6 +122,16 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int {
return total return total
} }
func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
var total int
for _, f := range b {
if f != 0 {
total += int(h.codes[f].len)
}
}
return total
}
// Return the number of literals assigned to each bit size in the Huffman encoding // Return the number of literals assigned to each bit size in the Huffman encoding
// //
// This method is only called when list.length >= 3 // This method is only called when list.length >= 3
@ -327,37 +337,40 @@ func atLeastOne(v float32) float32 {
return v return v
} }
// Unassigned values are assigned '1' in the histogram.
func fillHist(b []uint16) {
for i, v := range b {
if v == 0 {
b[i] = 1
}
}
}
// histogramSize accumulates a histogram of b in h. // histogramSize accumulates a histogram of b in h.
// An estimated size in bits is returned. // An estimated size in bits is returned.
// Unassigned values are assigned '1' in the histogram.
// len(h) must be >= 256, and h's elements must be all zeroes. // len(h) must be >= 256, and h's elements must be all zeroes.
func histogramSize(b []byte, h []uint16, fill bool) (int, int) { func histogramSize(b []byte, h []uint16, fill bool) (bits int) {
h = h[:256] h = h[:256]
for _, t := range b { for _, t := range b {
h[t]++ h[t]++
} }
invTotal := 1.0 / float32(len(b)) total := len(b)
shannon := float32(0.0)
var extra float32
if fill { if fill {
oneBits := atLeastOne(-mFastLog2(invTotal)) for _, v := range h {
for i, v := range h[:] { if v == 0 {
if v > 0 { total++
n := float32(v)
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
} else {
h[i] = 1
extra += oneBits
}
}
} else {
for _, v := range h[:] {
if v > 0 {
n := float32(v)
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
} }
} }
} }
return int(shannon + 0.99), int(extra + 0.99) invTotal := 1.0 / float32(total)
shannon := float32(0.0)
for _, v := range h {
if v > 0 {
n := float32(v)
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
}
}
return int(shannon + 0.99)
} }

View File

@ -155,7 +155,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
// Store every second hash in-between, but offset by 1. // Store every second hash in-between, but offset by 1.
for i := s - l + 2; i < s-5; i += 7 { for i := s - l + 2; i < s-5; i += 7 {
x := load6432(src, int32(i)) x := load6432(src, i)
nextHash := hash4u(uint32(x), bTableBits) nextHash := hash4u(uint32(x), bTableBits)
e.table[nextHash] = tableEntry{offset: e.cur + i} e.table[nextHash] = tableEntry{offset: e.cur + i}
// Skip one // Skip one

10
vendor/modules.txt vendored
View File

@ -512,7 +512,7 @@ github.com/hashicorp/packer-plugin-docker/post-processor/docker-import
github.com/hashicorp/packer-plugin-docker/post-processor/docker-push github.com/hashicorp/packer-plugin-docker/post-processor/docker-push
github.com/hashicorp/packer-plugin-docker/post-processor/docker-save github.com/hashicorp/packer-plugin-docker/post-processor/docker-save
github.com/hashicorp/packer-plugin-docker/post-processor/docker-tag github.com/hashicorp/packer-plugin-docker/post-processor/docker-tag
# github.com/hashicorp/packer-plugin-sdk v0.1.3-0.20210407132324-af39c7839daf # github.com/hashicorp/packer-plugin-sdk v0.1.3
## explicit ## explicit
github.com/hashicorp/packer-plugin-sdk/acctest github.com/hashicorp/packer-plugin-sdk/acctest
github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc
@ -554,6 +554,11 @@ github.com/hashicorp/packer-plugin-sdk/tmp
github.com/hashicorp/packer-plugin-sdk/useragent github.com/hashicorp/packer-plugin-sdk/useragent
github.com/hashicorp/packer-plugin-sdk/uuid github.com/hashicorp/packer-plugin-sdk/uuid
github.com/hashicorp/packer-plugin-sdk/version github.com/hashicorp/packer-plugin-sdk/version
# github.com/hashicorp/packer-plugin-vagrant v0.0.1 => /Users/mmarsh/Projects/packer-plugin-vagrant
## explicit
github.com/hashicorp/packer-plugin-vagrant/builder/vagrant
github.com/hashicorp/packer-plugin-vagrant/post-processor/vagrant
github.com/hashicorp/packer-plugin-vagrant/post-processor/vagrant-cloud
# github.com/hashicorp/serf v0.9.2 # github.com/hashicorp/serf v0.9.2
github.com/hashicorp/serf/coordinate github.com/hashicorp/serf/coordinate
# github.com/hashicorp/vault/api v1.0.4 # github.com/hashicorp/vault/api v1.0.4
@ -608,7 +613,7 @@ github.com/json-iterator/go
github.com/jstemmer/go-junit-report github.com/jstemmer/go-junit-report
github.com/jstemmer/go-junit-report/formatter github.com/jstemmer/go-junit-report/formatter
github.com/jstemmer/go-junit-report/parser github.com/jstemmer/go-junit-report/parser
# github.com/klauspost/compress v1.11.7 # github.com/klauspost/compress v1.11.13
github.com/klauspost/compress/flate github.com/klauspost/compress/flate
# github.com/klauspost/crc32 v1.2.0 # github.com/klauspost/crc32 v1.2.0
github.com/klauspost/crc32 github.com/klauspost/crc32
@ -1167,3 +1172,4 @@ gopkg.in/square/go-jose.v2/jwt
gopkg.in/yaml.v2 gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b # gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
gopkg.in/yaml.v3 gopkg.in/yaml.v3
# github.com/hashicorp/packer-plugin-vagrant => /Users/mmarsh/Projects/packer-plugin-vagrant