Remove driver test utility. Move driver tests to the driver folder

This commit is contained in:
Andrei Tonkikh 2017-11-14 00:02:46 +03:00
parent 7794c149a1
commit 6e3e92e356
11 changed files with 527 additions and 412 deletions

View File

@ -1,12 +1,13 @@
package main
import (
"encoding/json"
builderT "github.com/hashicorp/packer/helper/builder/testing"
"github.com/hashicorp/packer/packer"
"github.com/jetbrains-infra/packer-builder-vsphere/driver"
driverT "github.com/jetbrains-infra/packer-builder-vsphere/driver/testing"
"testing"
builderT "github.com/hashicorp/packer/helper/builder/testing"
"fmt"
"github.com/hashicorp/packer/packer"
"encoding/json"
"math/rand"
"github.com/jetbrains-infra/packer-builder-vsphere/driver"
)
func TestBuilderAcc_default(t *testing.T) {
@ -14,12 +15,7 @@ func TestBuilderAcc_default(t *testing.T) {
builderT.Test(t, builderT.TestCase{
Builder: &Builder{},
Template: renderConfig(config),
Check: func(artifacts []packer.Artifact) error {
d := driverT.NewTestDriver(t)
driverT.VMCheckDefault(t, d, getVM(t, d, artifacts), config["vm_name"].(string),
config["host"].(string), "datastore1")
return nil
},
Check: checkDefault(t, config["vm_name"].(string), config["host"].(string), "datastore1"),
})
}
@ -36,10 +32,69 @@ func defaultConfig() map[string]interface{} {
"ssh_username": "root",
"ssh_password": "jetbrains",
}
config["vm_name"] = driverT.NewVMName()
config["vm_name"] = fmt.Sprintf("test-%v", rand.Intn(1000))
return config
}
func checkDefault(t *testing.T, name string, host string, datastore string) builderT.TestCheckFunc {
return func(artifacts []packer.Artifact) error {
d := testConn(t)
vm := getVM(t, d, artifacts)
vmInfo, err := vm.Info("name", "parent", "runtime.host", "resourcePool", "datastore", "layoutEx.disk")
if err != nil {
t.Fatalf("Cannot read VM properties: %v", err)
}
if vmInfo.Name != name {
t.Errorf("Invalid VM name: expected '%v', got '%v'", name, vmInfo.Name)
}
f := d.NewFolder(vmInfo.Parent)
folderPath, err := f.Path()
if err != nil {
t.Fatalf("Cannot read folder name: %v", err)
}
if folderPath != "" {
t.Errorf("Invalid folder: expected '/', got '%v'", folderPath)
}
h := d.NewHost(vmInfo.Runtime.Host)
hostInfo, err := h.Info("name")
if err != nil {
t.Fatal("Cannot read host properties: ", err)
}
if hostInfo.Name != host {
t.Errorf("Invalid host name: expected '%v', got '%v'", host, hostInfo.Name)
}
p := d.NewResourcePool(vmInfo.ResourcePool)
poolPath, err := p.Path()
if err != nil {
t.Fatalf("Cannot read resource pool name: %v", err)
}
if poolPath != "" {
t.Error("Invalid resource pool: expected '/', got '%v'", poolPath)
}
dsr := vmInfo.Datastore[0].Reference()
ds := d.NewDatastore(&dsr)
dsInfo, err := ds.Info("name")
if err != nil {
t.Fatal("Cannot read datastore properties: ", err)
}
if dsInfo.Name != datastore {
t.Errorf("Invalid datastore name: expected '%v', got '%v'", datastore, dsInfo.Name)
}
if len(vmInfo.LayoutEx.Disk[0].Chain) != 1 {
t.Error("Not a full clone")
}
return nil
}
}
func TestBuilderAcc_artifact(t *testing.T) {
config := defaultConfig()
builderT.Test(t, builderT.TestCase{
@ -80,23 +135,6 @@ func folderConfig() string {
return renderConfig(config)
}
func checkFolder(t *testing.T, folder string) builderT.TestCheckFunc {
return func(artifacts []packer.Artifact) error {
d := driverT.NewTestDriver(t)
vm := getVM(t, d, artifacts)
vmInfo, err := vm.Info("parent")
if err != nil {
t.Fatalf("Cannot read VM properties: %v", err)
}
f := d.NewFolder(vmInfo.Parent)
driverT.CheckFolderPath(t, f, folder)
return nil
}
}
func TestBuilderAcc_resourcePool(t *testing.T) {
builderT.Test(t, builderT.TestCase{
Builder: &Builder{},
@ -114,7 +152,7 @@ func resourcePoolConfig() string {
func checkResourcePool(t *testing.T, pool string) builderT.TestCheckFunc {
return func(artifacts []packer.Artifact) error {
d := driverT.NewTestDriver(t)
d := testConn(t)
vm := getVM(t, d, artifacts)
vmInfo, err := vm.Info("resourcePool")
@ -122,7 +160,15 @@ func checkResourcePool(t *testing.T, pool string) builderT.TestCheckFunc {
t.Fatalf("Cannot read VM properties: %v", err)
}
driverT.CheckResourcePoolPath(t, d.NewResourcePool(vmInfo.ResourcePool), pool)
p := d.NewResourcePool(vmInfo.ResourcePool)
path, err := p.Path()
if err != nil {
t.Fatalf("Cannot read resource pool name: %v", err)
}
if path != pool {
t.Errorf("Wrong folder. expected: %v, got: %v", pool, path)
}
return nil
}
}
@ -131,11 +177,7 @@ func TestBuilderAcc_datastore(t *testing.T) {
builderT.Test(t, builderT.TestCase{
Builder: &Builder{},
Template: datastoreConfig(),
Check: func(artifacts []packer.Artifact) error {
d := driverT.NewTestDriver(t)
driverT.VMCheckDatastore(t, d, getVM(t, d, artifacts), "datastore1")
return nil
},
Check: checkDatastore(t, "datastore1"), // on esxi-1.vsphere55.test
})
}
@ -145,6 +187,34 @@ func datastoreConfig() string {
return renderConfig(config)
}
func checkDatastore(t *testing.T, name string) builderT.TestCheckFunc {
return func(artifacts []packer.Artifact) error {
d := testConn(t)
vm := getVM(t, d, artifacts)
vmInfo, err := vm.Info("datastore")
if err != nil {
t.Fatalf("Cannot read VM properties: %v", err)
}
n := len(vmInfo.Datastore)
if n != 1 {
t.Fatalf("VM should have 1 datastore, got %v", n)
}
ds := d.NewDatastore(&vmInfo.Datastore[0])
info, err := ds.Info("name")
if err != nil {
t.Fatalf("Cannot read datastore properties: %v", err)
}
if info.Name != name {
t.Errorf("Wrong datastore. expected: %v, got: %v", name, info.Name)
}
return nil
}
}
func TestBuilderAcc_multipleDatastores(t *testing.T) {
t.Skip("test must fail")
@ -176,7 +246,7 @@ func linkedCloneConfig() string {
func checkLinkedClone(t *testing.T) builderT.TestCheckFunc {
return func(artifacts []packer.Artifact) error {
d := driverT.NewTestDriver(t)
d := testConn(t)
vm := getVM(t, d, artifacts)
vmInfo, err := vm.Info("layoutEx.disk")
@ -196,11 +266,7 @@ func TestBuilderAcc_hardware(t *testing.T) {
builderT.Test(t, builderT.TestCase{
Builder: &Builder{},
Template: hardwareConfig(),
Check: func(artifacts []packer.Artifact) error {
d := driverT.NewTestDriver(t)
driverT.VMCheckHardware(t, d, getVM(t, d, artifacts))
return nil
},
Check: checkHardware(t),
})
}
@ -216,6 +282,45 @@ func hardwareConfig() string {
return renderConfig(config)
}
func checkHardware(t *testing.T) builderT.TestCheckFunc {
return func(artifacts []packer.Artifact) error {
d := testConn(t)
vm := getVM(t, d, artifacts)
vmInfo, err := vm.Info("config")
if err != nil {
t.Fatalf("Cannot read VM properties: %v", err)
}
cpuSockets := vmInfo.Config.Hardware.NumCPU
if cpuSockets != 2 {
t.Errorf("VM should have 2 CPU sockets, got %v", cpuSockets)
}
cpuReservation := vmInfo.Config.CpuAllocation.GetResourceAllocationInfo().Reservation
if cpuReservation != 1000 {
t.Errorf("VM should have CPU reservation for 1000 Mhz, got %v", cpuReservation)
}
cpuLimit := vmInfo.Config.CpuAllocation.GetResourceAllocationInfo().Limit
if cpuLimit != 1500 {
t.Errorf("VM should have CPU reservation for 1500 Mhz, got %v", cpuLimit)
}
ram := vmInfo.Config.Hardware.MemoryMB
if ram != 2048 {
t.Errorf("VM should have 2048 MB of RAM, got %v", ram)
}
ramReservation := vmInfo.Config.MemoryAllocation.GetResourceAllocationInfo().Reservation
if ramReservation != 1024 {
t.Errorf("VM should have RAM reservation for 1024 MB, got %v", ramReservation)
}
return nil
}
}
func TestBuilderAcc_RAMReservation(t *testing.T) {
builderT.Test(t, builderT.TestCase{
Builder: &Builder{},
@ -234,7 +339,7 @@ func RAMReservationConfig() string {
func checkRAMReservation(t *testing.T) builderT.TestCheckFunc {
return func(artifacts []packer.Artifact) error {
d := driverT.NewTestDriver(t)
d := testConn(t)
vm := getVM(t, d, artifacts)
vmInfo, err := vm.Info("config")
@ -269,11 +374,7 @@ func TestBuilderAcc_snapshot(t *testing.T) {
builderT.Test(t, builderT.TestCase{
Builder: &Builder{},
Template: snapshotConfig(),
Check: func(artifacts []packer.Artifact) error {
d := driverT.NewTestDriver(t)
driverT.VMCheckSnapshor(t, d, getVM(t, d, artifacts))
return nil
},
Check: checkSnapshot(t),
})
}
@ -283,15 +384,30 @@ func snapshotConfig() string {
return renderConfig(config)
}
func checkSnapshot(t *testing.T) builderT.TestCheckFunc {
return func(artifacts []packer.Artifact) error {
d := testConn(t)
vm := getVM(t, d, artifacts)
vmInfo, err := vm.Info("layoutEx.disk")
if err != nil {
t.Fatalf("Cannot read VM properties: %v", err)
}
layers := len(vmInfo.LayoutEx.Disk[0].Chain)
if layers != 2 {
t.Errorf("VM should have a single snapshot. expected 2 disk layers, got %v", layers)
}
return nil
}
}
func TestBuilderAcc_template(t *testing.T) {
builderT.Test(t, builderT.TestCase{
Builder: &Builder{},
Template: templateConfig(),
Check: func(artifacts []packer.Artifact) error {
d := driverT.NewTestDriver(t)
driverT.VMCheckTemplate(t, d, getVM(t, d, artifacts))
return nil
},
Check: checkTemplate(t),
})
}
@ -302,6 +418,24 @@ func templateConfig() string {
return renderConfig(config)
}
func checkTemplate(t *testing.T) builderT.TestCheckFunc {
return func(artifacts []packer.Artifact) error {
d := testConn(t)
vm := getVM(t, d, artifacts)
vmInfo, err := vm.Info("config.template")
if err != nil {
t.Fatalf("Cannot read VM properties: %v", err)
}
if vmInfo.Config.Template != true {
t.Error("Not a template")
}
return nil
}
}
func renderConfig(config map[string]interface{}) string {
t := map[string][]map[string]interface{}{
"builders": {
@ -318,6 +452,19 @@ func renderConfig(config map[string]interface{}) string {
return string(j)
}
func testConn(t *testing.T) *driver.Driver {
d, err := driver.NewDriver(&driver.ConnectConfig{
VCenterServer: "vcenter.vsphere55.test",
Username: "root",
Password: "jetbrains",
InsecureConnection: true,
})
if err != nil {
t.Fatal("Cannot connect: ", err)
}
return d
}
func getVM(t *testing.T, d *driver.Driver, artifacts []packer.Artifact) *driver.VirtualMachine {
artifactRaw := artifacts[0]
artifact, _ := artifactRaw.(*Artifact)

View File

@ -0,0 +1,22 @@
package driver
import (
"testing"
)
func TestDatastoreAcc(t *testing.T) {
initDriverAcceptanceTest(t)
d := newTestDriver(t)
ds, err := d.FindDatastore("datastore1")
if err != nil {
t.Fatalf("Cannot find the default datastore '%v': %v", "datastore1", err)
}
info, err := ds.Info("name")
if err != nil {
t.Fatalf("Cannot read datastore properties: %v", err)
}
if info.Name != "datastore1" {
t.Errorf("Wrong datastore. expected: 'datastore1', got: '%v'", info.Name)
}
}

46
driver/driver_test.go Normal file
View File

@ -0,0 +1,46 @@
package driver
import (
"os"
"fmt"
"testing"
"time"
"math/rand"
)
// Defines whether acceptance tests should be run
const TestEnvVar = "VSPHERE_DRIVER_ACC"
const hostName = "esxi-1.vsphere55.test"
func newTestDriver(t *testing.T) *Driver {
d, err := NewDriver(&ConnectConfig{
VCenterServer: "vcenter.vsphere55.test",
Username: "root",
Password: "jetbrains",
InsecureConnection: true,
})
if err != nil {
t.Fatalf("Cannot connect: %v", err)
}
return d
}
func newVMName() string {
rand.Seed(time.Now().UTC().UnixNano())
return fmt.Sprintf("test-%v", rand.Intn(1000))
}
func initDriverAcceptanceTest(t *testing.T) {
// We only run acceptance tests if an env var is set because they're
// slow and require outside configuration.
if os.Getenv(TestEnvVar) == "" {
t.Skip(fmt.Sprintf(
"Acceptance tests skipped unless env '%s' set",
TestEnvVar))
}
// We require verbose mode so that the user knows what is going on.
if !testing.Verbose() {
t.Fatal("Acceptance tests must be run with the -v flag on tests")
}
}

20
driver/folder_acc_test.go Normal file
View File

@ -0,0 +1,20 @@
package driver
import "testing"
func TestFolderAcc(t *testing.T) {
initDriverAcceptanceTest(t)
d := newTestDriver(t)
f, err := d.FindFolder("folder1/folder2")
if err != nil {
t.Fatalf("Cannot find the default folder '%v': %v", "folder1/folder2", err)
}
path, err := f.Path()
if err != nil {
t.Fatalf("Cannot read folder name: %v", err)
}
if path != "folder1/folder2" {
t.Errorf("Wrong folder. expected: 'folder1/folder2', got: '%v'", path)
}
}

View File

@ -1,4 +1,4 @@
package testing
package driver
import (
"testing"
@ -6,17 +6,18 @@ import (
func TestHostAcc(t *testing.T) {
initDriverAcceptanceTest(t)
hostName := "esxi-1.vsphere55.test"
d := NewTestDriver(t)
d := newTestDriver(t)
host, err := d.FindHost(hostName)
if err != nil {
t.Fatalf("Cannot find the default host '%v': %v", "datastore1", err)
}
switch info, err := host.Info("name"); {
case err != nil:
t.Errorf("Cannot read host properties: %v", err)
case info.Name != hostName:
info, err := host.Info("name")
if err != nil {
t.Fatalf("Cannot read host properties: %v", err)
}
if info.Name != hostName {
t.Errorf("Wrong host name: expected '%v', got: '%v'", hostName, info.Name)
}
}

View File

@ -1,14 +1,21 @@
package testing
package driver
import "testing"
func TestResourcePoolAcc(t *testing.T) {
initDriverAcceptanceTest(t)
d := NewTestDriver(t)
d := newTestDriver(t)
p, err := d.FindResourcePool("esxi-1.vsphere55.test", "pool1/pool2")
if err != nil {
t.Fatalf("Cannot find the default resource pool '%v': %v", "pool1/pool2", err)
}
CheckResourcePoolPath(t, p, "pool1/pool2")
path, err := p.Path()
if err != nil {
t.Fatalf("Cannot read resource pool name: %v", err)
}
if path != "pool1/pool2" {
t.Errorf("Wrong folder. expected: 'pool1/pool2', got: '%v'", path)
}
}

View File

@ -1,16 +0,0 @@
package testing
import (
"testing"
)
func TestDatastoreAcc(t *testing.T) {
initDriverAcceptanceTest(t)
d := NewTestDriver(t)
ds, err := d.FindDatastore("datastore1")
if err != nil {
t.Fatalf("Cannot find the default datastore '%v': %v", "datastore1", err)
}
CheckDatastoreName(t, ds, "datastore1")
}

View File

@ -1,14 +0,0 @@
package testing
import "testing"
func TestFolderAcc(t *testing.T) {
initDriverAcceptanceTest(t)
d := NewTestDriver(t)
f, err := d.FindFolder("folder1/folder2")
if err != nil {
t.Fatalf("Cannot find the default folder '%v': %v", "folder1/folder2", err)
}
CheckFolderPath(t, f, "folder1/folder2")
}

View File

@ -1,191 +0,0 @@
package testing
import (
"fmt"
"github.com/jetbrains-infra/packer-builder-vsphere/driver"
"math/rand"
"os"
"testing"
"time"
)
func NewTestDriver(t *testing.T) *driver.Driver {
d, err := driver.NewDriver(&driver.ConnectConfig{
VCenterServer: TestVCenterServer,
Username: TestVCenterUsername,
Password: TestVCenterPassword,
InsecureConnection: true,
})
if err != nil {
t.Fatalf("Cannot connect: %v", err)
}
return d
}
func NewVMName() string {
rand.Seed(time.Now().UTC().UnixNano())
return fmt.Sprintf("test-%v", rand.Intn(1000))
}
func CheckDatastoreName(t *testing.T, ds *driver.Datastore, datastore string) {
switch info, err := ds.Info("name"); {
case err != nil:
t.Errorf("Cannot read datastore properties: %v", err)
case info.Name != datastore:
t.Errorf("Wrong name: expected '%v', got: '%v'", datastore, info.Name)
}
}
func CheckResourcePoolPath(t *testing.T, p *driver.ResourcePool, pool string) {
switch path, err := p.Path(); {
case err != nil:
t.Errorf("Cannot read resource pool path: %v", err)
case path != pool:
t.Errorf("Wrong name: expected '%v', got: '%v'", pool, path)
}
}
func CheckFolderPath(t *testing.T, f *driver.Folder, folder string) {
switch path, err := f.Path(); {
case err != nil:
t.Fatalf("Cannot read folder path: %v", err)
case path != folder:
t.Errorf("Wrong path: expected '%v', got: '%v'", folder, path)
}
}
func initDriverAcceptanceTest(t *testing.T) {
// We only run acceptance tests if an env var is set because they're
// slow and require outside configuration.
if os.Getenv(TestEnvVar) == "" {
t.Skip(fmt.Sprintf(
"Acceptance tests skipped unless env '%s' set",
TestEnvVar))
}
// We require verbose mode so that the user knows what is going on.
if !testing.Verbose() {
t.Fatal("Acceptance tests must be run with the -v flag on tests")
}
}
func VMCheckDefault(t *testing.T, d *driver.Driver, vm *driver.VirtualMachine,
name string, host string, datastore string) {
vmInfo, err := vm.Info("name", "parent", "runtime.host", "resourcePool", "datastore", "layoutEx.disk")
if err != nil {
t.Fatalf("Cannot read VM properties: %v", err)
}
if vmInfo.Name != name {
t.Errorf("Invalid VM name: expected '%v', got '%v'", name, vmInfo.Name)
}
f := d.NewFolder(vmInfo.Parent)
switch folderPath, err := f.Path(); {
case err != nil:
t.Errorf("Cannot read folder name: %v", err)
case folderPath != "":
t.Errorf("Invalid folder: expected '/', got '%v'", folderPath)
}
h := d.NewHost(vmInfo.Runtime.Host)
switch hostInfo, err := h.Info("name"); {
case err != nil:
t.Errorf("Cannot read host properties: %v", err)
case hostInfo.Name != host:
t.Errorf("Invalid host name: expected '%v', got '%v'", host, hostInfo.Name)
}
p := d.NewResourcePool(vmInfo.ResourcePool)
switch poolPath, err := p.Path(); {
case err != nil:
t.Errorf("Cannot read resource pool name: %v", err)
case poolPath != "":
t.Error("Invalid resource pool: expected '/', got '%v'", poolPath)
}
dsr := vmInfo.Datastore[0].Reference()
ds := d.NewDatastore(&dsr)
switch dsInfo, err := ds.Info("name"); {
case err != nil:
t.Errorf("Cannot read datastore properties: %v", err)
case dsInfo.Name != datastore:
t.Errorf("Invalid datastore name: expected '%v', got '%v'", datastore, dsInfo.Name)
}
if len(vmInfo.LayoutEx.Disk[0].Chain) != 1 {
t.Error("Not a full clone")
}
}
func VMCheckHardware(t* testing.T, d *driver.Driver, vm *driver.VirtualMachine) {
vmInfo, err := vm.Info("config")
if err != nil {
t.Fatalf("Cannot read VM properties: %v", err)
}
cpuSockets := vmInfo.Config.Hardware.NumCPU
if cpuSockets != 2 {
t.Errorf("VM should have 2 CPU sockets, got %v", cpuSockets)
}
cpuReservation := vmInfo.Config.CpuAllocation.GetResourceAllocationInfo().Reservation
if cpuReservation != 1000 {
t.Errorf("VM should have CPU reservation for 1000 Mhz, got %v", cpuReservation)
}
cpuLimit := vmInfo.Config.CpuAllocation.GetResourceAllocationInfo().Limit
if cpuLimit != 1500 {
t.Errorf("VM should have CPU reservation for 1500 Mhz, got %v", cpuLimit)
}
ram := vmInfo.Config.Hardware.MemoryMB
if ram != 2048 {
t.Errorf("VM should have 2048 MB of RAM, got %v", ram)
}
ramReservation := vmInfo.Config.MemoryAllocation.GetResourceAllocationInfo().Reservation
if ramReservation != 1024 {
t.Errorf("VM should have RAM reservation for 1024 MB, got %v", ramReservation)
}
}
func VMCheckTemplate(t* testing.T, d *driver.Driver, vm *driver.VirtualMachine) {
vmInfo, err := vm.Info("config.template")
switch {
case err != nil:
t.Errorf("Cannot read VM properties: %v", err)
case !vmInfo.Config.Template:
t.Error("Not a template")
}
}
func VMCheckDatastore(t* testing.T, d *driver.Driver, vm *driver.VirtualMachine, name string) {
vmInfo, err := vm.Info("datastore")
if err != nil {
t.Fatalf("Cannot read VM properties: %v", err)
}
n := len(vmInfo.Datastore)
if n != 1 {
t.Fatalf("VM should have 1 datastore, got %v", n)
}
ds := d.NewDatastore(&vmInfo.Datastore[0])
CheckDatastoreName(t, ds, name)
}
func VMCheckSnapshor(t* testing.T, d *driver.Driver, vm *driver.VirtualMachine) {
vmInfo, err := vm.Info("layoutEx.disk")
if err != nil {
t.Fatalf("Cannot read VM properties: %v", err)
}
layers := len(vmInfo.LayoutEx.Disk[0].Chain)
if layers != 2 {
t.Errorf("VM should have a single snapshot. expected 2 disk layers, got %v", layers)
}
}

View File

@ -1,128 +0,0 @@
package testing
import (
"github.com/jetbrains-infra/packer-builder-vsphere/driver"
"log"
"testing"
"net"
"time"
)
func initVMAccTest(t *testing.T) (d *driver.Driver, vm *driver.VirtualMachine, vmName string, vmDestructor func()) {
initDriverAcceptanceTest(t)
templateName := "alpine"
d = NewTestDriver(t)
template, err := d.FindVM(templateName) // Don't destroy this VM!
if err != nil {
t.Fatalf("Cannot find template vm '%v': %v", templateName, err)
}
log.Printf("[DEBUG] Clonning VM")
vmName = NewVMName()
vm, err = template.Clone(&driver.CloneConfig{
Name: vmName,
Host: "esxi-1.vsphere55.test",
})
if err != nil {
t.Fatalf("Cannot clone vm '%v': %v", templateName, err)
}
vmDestructor = func() {
log.Printf("[DEBUG] Removing the clone")
if err := vm.Destroy(); err != nil {
t.Errorf("!!! ERROR REMOVING VM '%v': %v!!!", vmName, err)
}
// Check that the clone is no longer exists
if _, err := d.FindVM(vmName); err == nil {
t.Errorf("!!! STILL CAN FIND VM '%v'. IT MIGHT NOT HAVE BEEN DELETED !!!", vmName)
}
}
return
}
func TestVMAcc_default(t *testing.T) {
d, vm, vmName, vmDestructor := initVMAccTest(t)
defer vmDestructor()
// Check that the clone can be found by its name
if _, err := d.FindVM(vmName); err != nil {
t.Errorf("Cannot find created vm '%v': %v", vmName, err)
}
// Run checks
log.Printf("[DEBUG] Running check function")
VMCheckDefault(t, d, vm, vmName, "esxi-1.vsphere55.test", "datastore1")
}
func TestVMAcc_hardware(t *testing.T) {
d, vm, _ /*vmName*/, vmDestructor := initVMAccTest(t)
defer vmDestructor()
log.Printf("[DEBUG] Configuring the vm")
vm.Configure(&driver.HardwareConfig{
CPUs: 2,
CPUReservation: 1000,
CPULimit: 1500,
RAM: 2048,
RAMReservation: 1024,
})
log.Printf("[DEBUG] Running check function")
VMCheckHardware(t, d, vm)
}
func startVM(t *testing.T, vm *driver.VirtualMachine, vmName string) (stopper func()) {
log.Printf("[DEBUG] Starting the vm")
if err := vm.PowerOn(); err != nil {
t.Fatalf("Cannot start created vm '%v': %v", vmName, err)
}
return func() {
log.Printf("[DEBUG] Powering off the vm")
if err := vm.PowerOff(); err != nil {
t.Errorf("Cannot power off started vm '%v': %v", vmName, err)
}
}
}
func TestVMAcc_running(t *testing.T) {
_ /*d*/, vm, vmName, vmDestructor := initVMAccTest(t)
defer vmDestructor()
stopper := startVM(t, vm, vmName)
defer stopper()
switch ip, err := vm.WaitForIP(); {
case err != nil:
t.Errorf("Cannot obtain IP address from created vm '%v': %v", vmName, err)
case net.ParseIP(ip) == nil:
t.Errorf("'%v' is not a valid ip address", ip)
}
vm.StartShutdown()
log.Printf("[DEBUG] Waiting max 1m0s for shutdown to complete")
// TODO: there is complex logic in WaitForShutdown. It's better to test it well. It might be reasonable to create
// unit tests for it.
vm.WaitForShutdown(1 * time.Minute)
}
func TestVMAcc_running_snapshot(t *testing.T) {
d, vm, vmName, vmDestructor := initVMAccTest(t)
defer vmDestructor()
stopper := startVM(t, vm, vmName)
defer stopper()
vm.CreateSnapshot("test-snapshot")
VMCheckSnapshor(t, d, vm)
}
func TestVMAcc_template(t *testing.T) {
d, vm, _ /*vmName*/, vmDestructor := initVMAccTest(t)
defer vmDestructor()
vm.ConvertToTemplate()
VMCheckTemplate(t, d, vm)
}

221
driver/vm_acc_test.go Normal file
View File

@ -0,0 +1,221 @@
package driver
import (
"log"
"testing"
"net"
"time"
)
func initVMAccTest(t *testing.T) (d *Driver, vm *VirtualMachine, vmName string, vmDestructor func()) {
initDriverAcceptanceTest(t)
templateName := "alpine"
d = newTestDriver(t)
template, err := d.FindVM(templateName) // Don't destroy this VM!
if err != nil {
t.Fatalf("Cannot find template vm '%v': %v", templateName, err)
}
log.Printf("[DEBUG] Clonning VM")
vmName = newVMName()
vm, err = template.Clone(&CloneConfig{
Name: vmName,
Host: hostName,
})
if err != nil {
t.Fatalf("Cannot clone vm '%v': %v", templateName, err)
}
vmDestructor = func() {
log.Printf("[DEBUG] Removing the clone")
if err := vm.Destroy(); err != nil {
t.Errorf("!!! ERROR REMOVING VM '%v': %v!!!", vmName, err)
}
// Check that the clone is no longer exists
if _, err := d.FindVM(vmName); err == nil {
t.Errorf("!!! STILL CAN FIND VM '%v'. IT MIGHT NOT HAVE BEEN DELETED !!!", vmName)
}
}
return
}
func TestVMAcc_default(t *testing.T) {
d, vm, vmName, vmDestructor := initVMAccTest(t)
defer vmDestructor()
// Check that the clone can be found by its name
if _, err := d.FindVM(vmName); err != nil {
t.Errorf("Cannot find created vm '%v': %v", vmName, err)
}
// Run checks
log.Printf("[DEBUG] Running checks")
vmInfo, err := vm.Info("name", "parent", "runtime.host", "resourcePool", "datastore", "layoutEx.disk")
if err != nil {
t.Fatalf("Cannot read VM properties: %v", err)
}
if vmInfo.Name != vmName {
t.Errorf("Invalid VM name: expected '%v', got '%v'", vmName, vmInfo.Name)
}
f := d.NewFolder(vmInfo.Parent)
folderPath, err := f.Path()
if err != nil {
t.Fatalf("Cannot read folder name: %v", err)
}
if folderPath != "" {
t.Errorf("Invalid folder: expected '/', got '%v'", folderPath)
}
h := d.NewHost(vmInfo.Runtime.Host)
hostInfo, err := h.Info("name")
if err != nil {
t.Fatal("Cannot read host properties: ", err)
}
if hostInfo.Name != hostName {
t.Errorf("Invalid host name: expected '%v', got '%v'", hostName, hostInfo.Name)
}
p := d.NewResourcePool(vmInfo.ResourcePool)
poolPath, err := p.Path()
if err != nil {
t.Fatalf("Cannot read resource pool name: %v", err)
}
if poolPath != "" {
t.Error("Invalid resource pool: expected '/', got '%v'", poolPath)
}
dsr := vmInfo.Datastore[0].Reference()
ds := d.NewDatastore(&dsr)
dsInfo, err := ds.Info("name")
if err != nil {
t.Fatal("Cannot read datastore properties: ", err)
}
if dsInfo.Name != "datastore1" {
t.Errorf("Invalid datastore name: expected '%v', got '%v'", "datastore1", dsInfo.Name)
}
if len(vmInfo.LayoutEx.Disk[0].Chain) != 1 {
t.Error("Not a full clone")
}
}
func TestVMAcc_folder(t *testing.T) {
}
func TestVMAcc_hardware(t *testing.T) {
_ /*d*/, vm, _ /*vmName*/, vmDestructor := initVMAccTest(t)
defer vmDestructor()
log.Printf("[DEBUG] Configuring the vm")
config := &HardwareConfig{
CPUs: 2,
CPUReservation: 1000,
CPULimit: 1500,
RAM: 2048,
RAMReservation: 1024,
}
vm.Configure(config)
log.Printf("[DEBUG] Running checks")
vmInfo, err := vm.Info("config")
if err != nil {
t.Fatalf("Cannot read VM properties: %v", err)
}
cpuSockets := vmInfo.Config.Hardware.NumCPU
if cpuSockets != config.CPUs {
t.Errorf("VM should have %v CPU sockets, got %v", config.CPUs, cpuSockets)
}
cpuReservation := vmInfo.Config.CpuAllocation.GetResourceAllocationInfo().Reservation
if cpuReservation != config.CPUReservation {
t.Errorf("VM should have CPU reservation for %v Mhz, got %v", config.CPUReservation, cpuReservation)
}
cpuLimit := vmInfo.Config.CpuAllocation.GetResourceAllocationInfo().Limit
if cpuLimit != config.CPULimit {
t.Errorf("VM should have CPU reservation for %v Mhz, got %v", config.CPULimit, cpuLimit)
}
ram := vmInfo.Config.Hardware.MemoryMB
if int64(ram) != config.RAM {
t.Errorf("VM should have %v MB of RAM, got %v", config.RAM, ram)
}
ramReservation := vmInfo.Config.MemoryAllocation.GetResourceAllocationInfo().Reservation
if ramReservation != config.RAMReservation {
t.Errorf("VM should have RAM reservation for %v MB, got %v", config.RAMReservation, ramReservation)
}
}
func startVM(t *testing.T, vm *VirtualMachine, vmName string) (stopper func()) {
log.Printf("[DEBUG] Starting the vm")
if err := vm.PowerOn(); err != nil {
t.Fatalf("Cannot start created vm '%v': %v", vmName, err)
}
return func() {
log.Printf("[DEBUG] Powering off the vm")
if err := vm.PowerOff(); err != nil {
t.Errorf("Cannot power off started vm '%v': %v", vmName, err)
}
}
}
func TestVMAcc_running(t *testing.T) {
_ /*d*/, vm, vmName, vmDestructor := initVMAccTest(t)
defer vmDestructor()
stopper := startVM(t, vm, vmName)
defer stopper()
switch ip, err := vm.WaitForIP(); {
case err != nil:
t.Errorf("Cannot obtain IP address from created vm '%v': %v", vmName, err)
case net.ParseIP(ip) == nil:
t.Errorf("'%v' is not a valid ip address", ip)
}
vm.StartShutdown()
log.Printf("[DEBUG] Waiting max 1m0s for shutdown to complete")
vm.WaitForShutdown(1 * time.Minute)
}
func TestVMAcc_snapshot(t *testing.T) {
_ /*d*/, vm, vmName, vmDestructor := initVMAccTest(t)
defer vmDestructor()
stopper := startVM(t, vm, vmName)
defer stopper()
vm.CreateSnapshot("test-snapshot")
vmInfo, err := vm.Info("layoutEx.disk")
if err != nil {
t.Fatalf("Cannot read VM properties: %v", err)
}
layers := len(vmInfo.LayoutEx.Disk[0].Chain)
if layers != 2 {
t.Errorf("VM should have a single snapshot. expected 2 disk layers, got %v", layers)
}
}
func TestVMAcc_template(t *testing.T) {
_ /*d*/, vm, _ /*vmName*/, vmDestructor := initVMAccTest(t)
defer vmDestructor()
vm.ConvertToTemplate()
vmInfo, err := vm.Info("config.template")
if err != nil {
t.Errorf("Cannot read VM properties: %v", err)
} else if !vmInfo.Config.Template {
t.Error("Not a template")
}
}