825 lines
23 KiB
Go
825 lines
23 KiB
Go
/*
|
|
Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package simulator
|
|
|
|
import (
|
|
"context"
|
|
"crypto/tls"
|
|
"fmt"
|
|
"io/ioutil"
|
|
"log"
|
|
"os"
|
|
"path"
|
|
"path/filepath"
|
|
"reflect"
|
|
|
|
"github.com/google/uuid"
|
|
"github.com/vmware/govmomi"
|
|
"github.com/vmware/govmomi/object"
|
|
"github.com/vmware/govmomi/simulator/esx"
|
|
"github.com/vmware/govmomi/simulator/vpx"
|
|
"github.com/vmware/govmomi/vim25"
|
|
"github.com/vmware/govmomi/vim25/methods"
|
|
"github.com/vmware/govmomi/vim25/mo"
|
|
"github.com/vmware/govmomi/vim25/types"
|
|
"github.com/vmware/govmomi/vim25/xml"
|
|
)
|
|
|
|
type DelayConfig struct {
|
|
// Delay specifies the number of milliseconds to delay serving a SOAP call. 0 means no delay.
|
|
// This can be used to simulate a poorly performing vCenter or network lag.
|
|
Delay int
|
|
|
|
// Delay specifies the number of milliseconds to delay serving a specific method.
|
|
// Each entry in the map represents the name of a method and its associated delay in milliseconds,
|
|
// This can be used to simulate a poorly performing vCenter or network lag.
|
|
MethodDelay map[string]int
|
|
|
|
// DelayJitter defines the delay jitter as a coefficient of variation (stddev/mean).
|
|
// This can be used to simulate unpredictable delay. 0 means no jitter, i.e. all invocations get the same delay.
|
|
DelayJitter float64
|
|
}
|
|
|
|
// Model is used to populate a Model with an initial set of managed entities.
|
|
// This is a simple helper for tests running against a simulator, to populate an inventory
|
|
// with commonly used models.
|
|
// The inventory names generated by a Model have a string prefix per-type and integer suffix per-instance.
|
|
// The names are concatenated with their ancestor names and delimited by '_', making the generated names unique.
|
|
type Model struct {
|
|
Service *Service `json:"-"`
|
|
|
|
ServiceContent types.ServiceContent `json:"-"`
|
|
RootFolder mo.Folder `json:"-"`
|
|
|
|
// Autostart will power on Model created VMs when true
|
|
Autostart bool `json:"-"`
|
|
|
|
// Datacenter specifies the number of Datacenter entities to create
|
|
// Name prefix: DC, vcsim flag: -dc
|
|
Datacenter int
|
|
|
|
// Portgroup specifies the number of DistributedVirtualPortgroup entities to create per Datacenter
|
|
// Name prefix: DVPG, vcsim flag: -pg
|
|
Portgroup int
|
|
|
|
// PortgroupNSX specifies the number NSX backed DistributedVirtualPortgroup entities to create per Datacenter
|
|
// Name prefix: NSXPG, vcsim flag: -nsx-pg
|
|
PortgroupNSX int
|
|
|
|
// OpaqueNetwork specifies the number of OpaqueNetwork entities to create per Datacenter,
|
|
// with Summary.OpaqueNetworkType set to nsx.LogicalSwitch and Summary.OpaqueNetworkId to a random uuid.
|
|
// Name prefix: NSX, vcsim flag: -nsx
|
|
OpaqueNetwork int
|
|
|
|
// Host specifies the number of standalone HostSystems entities to create per Datacenter
|
|
// Name prefix: H, vcsim flag: -standalone-host
|
|
Host int `json:",omitempty"`
|
|
|
|
// Cluster specifies the number of ClusterComputeResource entities to create per Datacenter
|
|
// Name prefix: C, vcsim flag: -cluster
|
|
Cluster int
|
|
|
|
// ClusterHost specifies the number of HostSystems entities to create within a Cluster
|
|
// Name prefix: H, vcsim flag: -host
|
|
ClusterHost int `json:",omitempty"`
|
|
|
|
// Pool specifies the number of ResourcePool entities to create per Cluster
|
|
// Note that every cluster has a root ResourcePool named "Resources", as real vCenter does.
|
|
// For example: /DC0/host/DC0_C0/Resources
|
|
// The root ResourcePool is named "RP0" within other object names.
|
|
// When Model.Pool is set to 1 or higher, this creates child ResourcePools under the root pool.
|
|
// For example: /DC0/host/DC0_C0/Resources/DC0_C0_RP1
|
|
// Name prefix: RP, vcsim flag: -pool
|
|
Pool int
|
|
|
|
// Datastore specifies the number of Datastore entities to create
|
|
// Each Datastore will have temporary local file storage and will be mounted
|
|
// on every HostSystem created by the ModelConfig
|
|
// Name prefix: LocalDS, vcsim flag: -ds
|
|
Datastore int
|
|
|
|
// Machine specifies the number of VirtualMachine entities to create per ResourcePool
|
|
// Name prefix: VM, vcsim flag: -vm
|
|
Machine int
|
|
|
|
// Folder specifies the number of Datacenter to place within a Folder.
|
|
// This includes a folder for the Datacenter itself and its host, vm, network and datastore folders.
|
|
// All resources for the Datacenter are placed within these folders, rather than the top-level folders.
|
|
// Name prefix: F, vcsim flag: -folder
|
|
Folder int
|
|
|
|
// App specifies the number of VirtualApp to create per Cluster
|
|
// Name prefix: APP, vcsim flag: -app
|
|
App int
|
|
|
|
// Pod specifies the number of StoragePod to create per Cluster
|
|
// Name prefix: POD, vcsim flag: -pod
|
|
Pod int
|
|
|
|
// Delay configurations
|
|
DelayConfig DelayConfig `json:"-"`
|
|
|
|
// total number of inventory objects, set by Count()
|
|
total int
|
|
|
|
dirs []string
|
|
}
|
|
|
|
// ESX is the default Model for a standalone ESX instance
|
|
func ESX() *Model {
|
|
return &Model{
|
|
ServiceContent: esx.ServiceContent,
|
|
RootFolder: esx.RootFolder,
|
|
Autostart: true,
|
|
Datastore: 1,
|
|
Machine: 2,
|
|
DelayConfig: DelayConfig{
|
|
Delay: 0,
|
|
DelayJitter: 0,
|
|
MethodDelay: nil,
|
|
},
|
|
}
|
|
}
|
|
|
|
// VPX is the default Model for a vCenter instance
|
|
func VPX() *Model {
|
|
return &Model{
|
|
ServiceContent: vpx.ServiceContent,
|
|
RootFolder: vpx.RootFolder,
|
|
Autostart: true,
|
|
Datacenter: 1,
|
|
Portgroup: 1,
|
|
Host: 1,
|
|
Cluster: 1,
|
|
ClusterHost: 3,
|
|
Datastore: 1,
|
|
Machine: 2,
|
|
DelayConfig: DelayConfig{
|
|
Delay: 0,
|
|
DelayJitter: 0,
|
|
MethodDelay: nil,
|
|
},
|
|
}
|
|
}
|
|
|
|
// Count returns a Model with total number of each existing type
|
|
func (m *Model) Count() Model {
|
|
count := Model{}
|
|
|
|
for ref, obj := range Map.objects {
|
|
if _, ok := obj.(mo.Entity); !ok {
|
|
continue
|
|
}
|
|
|
|
count.total++
|
|
|
|
switch ref.Type {
|
|
case "Datacenter":
|
|
count.Datacenter++
|
|
case "DistributedVirtualPortgroup":
|
|
count.Portgroup++
|
|
case "ClusterComputeResource":
|
|
count.Cluster++
|
|
case "Datastore":
|
|
count.Datastore++
|
|
case "HostSystem":
|
|
count.Host++
|
|
case "VirtualMachine":
|
|
count.Machine++
|
|
case "ResourcePool":
|
|
count.Pool++
|
|
case "VirtualApp":
|
|
count.App++
|
|
case "Folder":
|
|
count.Folder++
|
|
case "StoragePod":
|
|
count.Pod++
|
|
case "OpaqueNetwork":
|
|
count.OpaqueNetwork++
|
|
}
|
|
}
|
|
|
|
return count
|
|
}
|
|
|
|
func (*Model) fmtName(prefix string, num int) string {
|
|
return fmt.Sprintf("%s%d", prefix, num)
|
|
}
|
|
|
|
// kinds maps managed object types to their vcsim wrapper types
|
|
var kinds = map[string]reflect.Type{
|
|
"AuthorizationManager": reflect.TypeOf((*AuthorizationManager)(nil)).Elem(),
|
|
"ClusterComputeResource": reflect.TypeOf((*ClusterComputeResource)(nil)).Elem(),
|
|
"CustomFieldsManager": reflect.TypeOf((*CustomFieldsManager)(nil)).Elem(),
|
|
"CustomizationSpecManager": reflect.TypeOf((*CustomizationSpecManager)(nil)).Elem(),
|
|
"Datacenter": reflect.TypeOf((*Datacenter)(nil)).Elem(),
|
|
"Datastore": reflect.TypeOf((*Datastore)(nil)).Elem(),
|
|
"DistributedVirtualPortgroup": reflect.TypeOf((*DistributedVirtualPortgroup)(nil)).Elem(),
|
|
"DistributedVirtualSwitch": reflect.TypeOf((*DistributedVirtualSwitch)(nil)).Elem(),
|
|
"EnvironmentBrowser": reflect.TypeOf((*EnvironmentBrowser)(nil)).Elem(),
|
|
"EventManager": reflect.TypeOf((*EventManager)(nil)).Elem(),
|
|
"FileManager": reflect.TypeOf((*FileManager)(nil)).Elem(),
|
|
"Folder": reflect.TypeOf((*Folder)(nil)).Elem(),
|
|
"GuestOperationsManager": reflect.TypeOf((*GuestOperationsManager)(nil)).Elem(),
|
|
"HostDatastoreBrowser": reflect.TypeOf((*HostDatastoreBrowser)(nil)).Elem(),
|
|
"HostLocalAccountManager": reflect.TypeOf((*HostLocalAccountManager)(nil)).Elem(),
|
|
"HostNetworkSystem": reflect.TypeOf((*HostNetworkSystem)(nil)).Elem(),
|
|
"HostSystem": reflect.TypeOf((*HostSystem)(nil)).Elem(),
|
|
"IpPoolManager": reflect.TypeOf((*IpPoolManager)(nil)).Elem(),
|
|
"LicenseManager": reflect.TypeOf((*LicenseManager)(nil)).Elem(),
|
|
"OptionManager": reflect.TypeOf((*OptionManager)(nil)).Elem(),
|
|
"OvfManager": reflect.TypeOf((*OvfManager)(nil)).Elem(),
|
|
"PerformanceManager": reflect.TypeOf((*PerformanceManager)(nil)).Elem(),
|
|
"PropertyCollector": reflect.TypeOf((*PropertyCollector)(nil)).Elem(),
|
|
"ResourcePool": reflect.TypeOf((*ResourcePool)(nil)).Elem(),
|
|
"SearchIndex": reflect.TypeOf((*SearchIndex)(nil)).Elem(),
|
|
"SessionManager": reflect.TypeOf((*SessionManager)(nil)).Elem(),
|
|
"StoragePod": reflect.TypeOf((*StoragePod)(nil)).Elem(),
|
|
"StorageResourceManager": reflect.TypeOf((*StorageResourceManager)(nil)).Elem(),
|
|
"TaskManager": reflect.TypeOf((*TaskManager)(nil)).Elem(),
|
|
"UserDirectory": reflect.TypeOf((*UserDirectory)(nil)).Elem(),
|
|
"VcenterVStorageObjectManager": reflect.TypeOf((*VcenterVStorageObjectManager)(nil)).Elem(),
|
|
"ViewManager": reflect.TypeOf((*ViewManager)(nil)).Elem(),
|
|
"VirtualApp": reflect.TypeOf((*VirtualApp)(nil)).Elem(),
|
|
"VirtualDiskManager": reflect.TypeOf((*VirtualDiskManager)(nil)).Elem(),
|
|
"VirtualMachine": reflect.TypeOf((*VirtualMachine)(nil)).Elem(),
|
|
"VmwareDistributedVirtualSwitch": reflect.TypeOf((*DistributedVirtualSwitch)(nil)).Elem(),
|
|
}
|
|
|
|
func loadObject(content types.ObjectContent) (mo.Reference, error) {
|
|
var obj mo.Reference
|
|
id := content.Obj
|
|
|
|
kind, ok := kinds[id.Type]
|
|
if ok {
|
|
obj = reflect.New(kind).Interface().(mo.Reference)
|
|
}
|
|
|
|
if obj == nil {
|
|
// No vcsim wrapper for this type, e.g. IoFilterManager
|
|
x, err := mo.ObjectContentToType(content, true)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
obj = x.(mo.Reference)
|
|
} else {
|
|
if len(content.PropSet) == 0 {
|
|
// via NewServiceInstance()
|
|
Map.setReference(obj, id)
|
|
} else {
|
|
// via Model.Load()
|
|
dst := getManagedObject(obj).Addr().Interface().(mo.Reference)
|
|
err := mo.LoadObjectContent([]types.ObjectContent{content}, dst)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
if x, ok := obj.(interface{ init(*Registry) }); ok {
|
|
x.init(Map)
|
|
}
|
|
}
|
|
|
|
return obj, nil
|
|
}
|
|
|
|
// resolveReferences attempts to resolve any object references that were not included via Load()
|
|
// example: Load's dir only contains a single OpaqueNetwork, we need to create a Datacenter and
|
|
// place the OpaqueNetwork in the Datacenter's network folder.
|
|
func (m *Model) resolveReferences(ctx *Context) error {
|
|
dc, ok := Map.Any("Datacenter").(*Datacenter)
|
|
if !ok {
|
|
// Need to have at least 1 Datacenter
|
|
root := Map.Get(Map.content().RootFolder).(*Folder)
|
|
ref := root.CreateDatacenter(internalContext, &types.CreateDatacenter{
|
|
This: root.Self,
|
|
Name: "DC0",
|
|
}).(*methods.CreateDatacenterBody).Res.Returnval
|
|
dc = Map.Get(ref).(*Datacenter)
|
|
}
|
|
|
|
for ref, val := range Map.objects {
|
|
me, ok := val.(mo.Entity)
|
|
if !ok {
|
|
continue
|
|
}
|
|
e := me.Entity()
|
|
if e.Parent == nil || ref.Type == "Folder" {
|
|
continue
|
|
}
|
|
if Map.Get(*e.Parent) == nil {
|
|
// object was loaded without its parent, attempt to foster with another parent
|
|
switch e.Parent.Type {
|
|
case "Folder":
|
|
folder := dc.folder(me)
|
|
e.Parent = &folder.Self
|
|
log.Printf("%s adopted %s", e.Parent, ref)
|
|
folderPutChild(ctx, folder, me)
|
|
default:
|
|
return fmt.Errorf("unable to foster %s with parent type=%s", ref, e.Parent.Type)
|
|
}
|
|
}
|
|
// TODO: resolve any remaining orphan references via mo.References()
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// Load Model from the given directory, as created by the 'govc object.save' command.
|
|
func (m *Model) Load(dir string) error {
|
|
ctx := internalContext
|
|
var s *ServiceInstance
|
|
|
|
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if info.IsDir() {
|
|
return nil
|
|
}
|
|
if filepath.Ext(path) != ".xml" {
|
|
return nil
|
|
}
|
|
|
|
f, err := os.Open(path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() { _ = f.Close() }()
|
|
|
|
dec := xml.NewDecoder(f)
|
|
dec.TypeFunc = types.TypeFunc()
|
|
var content types.ObjectContent
|
|
err = dec.Decode(&content)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if content.Obj == vim25.ServiceInstance {
|
|
s = new(ServiceInstance)
|
|
s.Self = content.Obj
|
|
Map = NewRegistry()
|
|
Map.Put(s)
|
|
return mo.LoadObjectContent([]types.ObjectContent{content}, &s.ServiceInstance)
|
|
}
|
|
|
|
if s == nil {
|
|
s = NewServiceInstance(m.ServiceContent, m.RootFolder)
|
|
}
|
|
|
|
obj, err := loadObject(content)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
Map.Put(obj)
|
|
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
m.Service = New(s)
|
|
|
|
return m.resolveReferences(ctx)
|
|
}
|
|
|
|
// Create populates the Model with the given ModelConfig
|
|
func (m *Model) Create() error {
|
|
ctx := internalContext
|
|
m.Service = New(NewServiceInstance(m.ServiceContent, m.RootFolder))
|
|
|
|
client := m.Service.client
|
|
root := object.NewRootFolder(client)
|
|
|
|
// After all hosts are created, this var is used to mount the host datastores.
|
|
var hosts []*object.HostSystem
|
|
hostMap := make(map[string][]*object.HostSystem)
|
|
|
|
// We need to defer VM creation until after the datastores are created.
|
|
var vms []func() error
|
|
// 1 DVS per DC, added to all hosts
|
|
var dvs *object.DistributedVirtualSwitch
|
|
// 1 NIC per VM, backed by a DVPG if Model.Portgroup > 0
|
|
vmnet := esx.EthernetCard.Backing
|
|
|
|
// addHost adds a cluster host or a stanalone host.
|
|
addHost := func(name string, f func(types.HostConnectSpec) (*object.Task, error)) (*object.HostSystem, error) {
|
|
spec := types.HostConnectSpec{
|
|
HostName: name,
|
|
}
|
|
|
|
task, err := f(spec)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
info, err := task.WaitForResult(context.Background(), nil)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
host := object.NewHostSystem(client, info.Result.(types.ManagedObjectReference))
|
|
hosts = append(hosts, host)
|
|
|
|
if dvs != nil {
|
|
config := &types.DVSConfigSpec{
|
|
Host: []types.DistributedVirtualSwitchHostMemberConfigSpec{{
|
|
Operation: string(types.ConfigSpecOperationAdd),
|
|
Host: host.Reference(),
|
|
}},
|
|
}
|
|
|
|
_, _ = dvs.Reconfigure(ctx, config)
|
|
}
|
|
|
|
return host, nil
|
|
}
|
|
|
|
// addMachine returns a func to create a VM.
|
|
addMachine := func(prefix string, host *object.HostSystem, pool *object.ResourcePool, folders *object.DatacenterFolders) {
|
|
nic := esx.EthernetCard
|
|
nic.Backing = vmnet
|
|
ds := types.ManagedObjectReference{}
|
|
|
|
f := func() error {
|
|
for i := 0; i < m.Machine; i++ {
|
|
name := m.fmtName(prefix+"_VM", i)
|
|
|
|
config := types.VirtualMachineConfigSpec{
|
|
Name: name,
|
|
GuestId: string(types.VirtualMachineGuestOsIdentifierOtherGuest),
|
|
Files: &types.VirtualMachineFileInfo{
|
|
VmPathName: "[LocalDS_0]",
|
|
},
|
|
}
|
|
|
|
if pool == nil {
|
|
pool, _ = host.ResourcePool(ctx)
|
|
}
|
|
|
|
var devices object.VirtualDeviceList
|
|
|
|
scsi, _ := devices.CreateSCSIController("pvscsi")
|
|
ide, _ := devices.CreateIDEController()
|
|
cdrom, _ := devices.CreateCdrom(ide.(*types.VirtualIDEController))
|
|
disk := devices.CreateDisk(scsi.(types.BaseVirtualController), ds,
|
|
config.Files.VmPathName+" "+path.Join(name, "disk1.vmdk"))
|
|
disk.CapacityInKB = 1024
|
|
|
|
devices = append(devices, scsi, cdrom, disk, &nic)
|
|
|
|
config.DeviceChange, _ = devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd)
|
|
|
|
task, err := folders.VmFolder.CreateVM(ctx, config, pool, host)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
info, err := task.WaitForResult(ctx, nil)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
vm := object.NewVirtualMachine(client, info.Result.(types.ManagedObjectReference))
|
|
|
|
if m.Autostart {
|
|
_, _ = vm.PowerOn(ctx)
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
vms = append(vms, f)
|
|
}
|
|
|
|
nfolder := 0
|
|
|
|
for ndc := 0; ndc < m.Datacenter; ndc++ {
|
|
dcName := m.fmtName("DC", ndc)
|
|
folder := root
|
|
fName := m.fmtName("F", nfolder)
|
|
|
|
// If Datacenter > Folder, don't create folders for the first N DCs.
|
|
if nfolder < m.Folder && ndc >= (m.Datacenter-m.Folder) {
|
|
f, err := folder.CreateFolder(ctx, fName)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
folder = f
|
|
}
|
|
|
|
dc, err := folder.CreateDatacenter(ctx, dcName)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
folders, err := dc.Folders(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if m.Pod > 0 {
|
|
for pod := 0; pod < m.Pod; pod++ {
|
|
_, _ = folders.DatastoreFolder.CreateStoragePod(ctx, m.fmtName(dcName+"_POD", pod))
|
|
}
|
|
}
|
|
|
|
if folder != root {
|
|
// Create sub-folders and use them to create any resources that follow
|
|
subs := []**object.Folder{&folders.DatastoreFolder, &folders.HostFolder, &folders.NetworkFolder, &folders.VmFolder}
|
|
|
|
for _, sub := range subs {
|
|
f, err := (*sub).CreateFolder(ctx, fName)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
*sub = f
|
|
}
|
|
|
|
nfolder++
|
|
}
|
|
|
|
if m.Portgroup > 0 || m.PortgroupNSX > 0 {
|
|
var spec types.DVSCreateSpec
|
|
spec.ConfigSpec = &types.VMwareDVSConfigSpec{}
|
|
spec.ConfigSpec.GetDVSConfigSpec().Name = m.fmtName("DVS", 0)
|
|
|
|
task, err := folders.NetworkFolder.CreateDVS(ctx, spec)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
info, err := task.WaitForResult(ctx, nil)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
dvs = object.NewDistributedVirtualSwitch(client, info.Result.(types.ManagedObjectReference))
|
|
}
|
|
|
|
for npg := 0; npg < m.Portgroup; npg++ {
|
|
name := m.fmtName(dcName+"_DVPG", npg)
|
|
|
|
task, err := dvs.AddPortgroup(ctx, []types.DVPortgroupConfigSpec{{Name: name}})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err = task.Wait(ctx); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Use the 1st DVPG for the VMs eth0 backing
|
|
if npg == 0 {
|
|
// AddPortgroup_Task does not return the moid, so we look it up by name
|
|
net := Map.Get(folders.NetworkFolder.Reference()).(*Folder)
|
|
pg := Map.FindByName(name, net.ChildEntity)
|
|
|
|
vmnet, _ = object.NewDistributedVirtualPortgroup(client, pg.Reference()).EthernetCardBackingInfo(ctx)
|
|
}
|
|
}
|
|
|
|
for npg := 0; npg < m.PortgroupNSX; npg++ {
|
|
name := m.fmtName(dcName+"_NSXPG", npg)
|
|
spec := types.DVPortgroupConfigSpec{
|
|
Name: name,
|
|
LogicalSwitchUuid: uuid.New().String(),
|
|
}
|
|
|
|
task, err := dvs.AddPortgroup(ctx, []types.DVPortgroupConfigSpec{spec})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err = task.Wait(ctx); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
// Must use simulator methods directly for OpaqueNetwork
|
|
networkFolder := Map.Get(folders.NetworkFolder.Reference()).(*Folder)
|
|
|
|
for i := 0; i < m.OpaqueNetwork; i++ {
|
|
var summary types.OpaqueNetworkSummary
|
|
summary.Name = m.fmtName(dcName+"_NSX", i)
|
|
err := networkFolder.AddOpaqueNetwork(summary)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
for nhost := 0; nhost < m.Host; nhost++ {
|
|
name := m.fmtName(dcName+"_H", nhost)
|
|
|
|
host, err := addHost(name, func(spec types.HostConnectSpec) (*object.Task, error) {
|
|
return folders.HostFolder.AddStandaloneHost(ctx, spec, true, nil, nil)
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
addMachine(name, host, nil, folders)
|
|
}
|
|
|
|
for ncluster := 0; ncluster < m.Cluster; ncluster++ {
|
|
clusterName := m.fmtName(dcName+"_C", ncluster)
|
|
|
|
cluster, err := folders.HostFolder.CreateCluster(ctx, clusterName, types.ClusterConfigSpecEx{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
for nhost := 0; nhost < m.ClusterHost; nhost++ {
|
|
name := m.fmtName(clusterName+"_H", nhost)
|
|
|
|
_, err = addHost(name, func(spec types.HostConnectSpec) (*object.Task, error) {
|
|
return cluster.AddHost(ctx, spec, true, nil, nil)
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
pool, err := cluster.ResourcePool(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
prefix := clusterName + "_RP"
|
|
|
|
addMachine(prefix+"0", nil, pool, folders)
|
|
|
|
for npool := 1; npool <= m.Pool; npool++ {
|
|
spec := types.DefaultResourceConfigSpec()
|
|
|
|
_, err = pool.Create(ctx, m.fmtName(prefix, npool), spec)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
prefix = clusterName + "_APP"
|
|
|
|
for napp := 0; napp < m.App; napp++ {
|
|
rspec := types.DefaultResourceConfigSpec()
|
|
vspec := NewVAppConfigSpec()
|
|
name := m.fmtName(prefix, napp)
|
|
|
|
vapp, err := pool.CreateVApp(ctx, name, rspec, vspec, nil)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
addMachine(name, nil, vapp.ResourcePool, folders)
|
|
}
|
|
}
|
|
|
|
hostMap[dcName] = hosts
|
|
hosts = nil
|
|
}
|
|
|
|
if m.ServiceContent.RootFolder == esx.RootFolder.Reference() {
|
|
// ESX model
|
|
host := object.NewHostSystem(client, esx.HostSystem.Reference())
|
|
|
|
dc := object.NewDatacenter(client, esx.Datacenter.Reference())
|
|
folders, err := dc.Folders(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
hostMap[dc.Reference().Value] = append(hosts, host)
|
|
|
|
addMachine(host.Reference().Value, host, nil, folders)
|
|
}
|
|
|
|
for dc, dchosts := range hostMap {
|
|
for i := 0; i < m.Datastore; i++ {
|
|
err := m.createLocalDatastore(dc, m.fmtName("LocalDS_", i), dchosts)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
|
|
for _, createVM := range vms {
|
|
err := createVM()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
// Turn on delay AFTER we're done building the service content
|
|
m.Service.delay = &m.DelayConfig
|
|
|
|
return nil
|
|
}
|
|
|
|
func (m *Model) createLocalDatastore(dc string, name string, hosts []*object.HostSystem) error {
|
|
ctx := context.Background()
|
|
dir, err := ioutil.TempDir("", fmt.Sprintf("govcsim-%s-%s-", dc, name))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
m.dirs = append(m.dirs, dir)
|
|
|
|
for _, host := range hosts {
|
|
dss, err := host.ConfigManager().DatastoreSystem(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
_, err = dss.CreateLocalDatastore(ctx, name, dir)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// Remove cleans up items created by the Model, such as local datastore directories
|
|
func (m *Model) Remove() {
|
|
// Remove associated vm containers, if any
|
|
for _, obj := range Map.objects {
|
|
if vm, ok := obj.(*VirtualMachine); ok {
|
|
vm.run.remove(vm)
|
|
}
|
|
}
|
|
|
|
for _, dir := range m.dirs {
|
|
_ = os.RemoveAll(dir)
|
|
}
|
|
}
|
|
|
|
// Run calls f with a Client connected to a simulator server instance, which is stopped after f returns.
|
|
func (m *Model) Run(f func(context.Context, *vim25.Client) error) error {
|
|
ctx := context.Background()
|
|
|
|
defer m.Remove()
|
|
|
|
if m.Service == nil {
|
|
err := m.Create()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
m.Service.TLS = new(tls.Config)
|
|
m.Service.RegisterEndpoints = true
|
|
|
|
s := m.Service.NewServer()
|
|
defer s.Close()
|
|
|
|
c, err := govmomi.NewClient(ctx, s.URL, true)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
defer c.Logout(ctx)
|
|
|
|
return f(ctx, c.Client)
|
|
}
|
|
|
|
// Run calls Model.Run for each model and will panic if f returns an error.
|
|
// If no model is specified, the VPX Model is used by default.
|
|
func Run(f func(context.Context, *vim25.Client) error, model ...*Model) {
|
|
m := model
|
|
if len(m) == 0 {
|
|
m = []*Model{VPX()}
|
|
}
|
|
|
|
for i := range m {
|
|
err := m[i].Run(f)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Test calls Run and expects the caller propagate any errors, via testing.T for example.
|
|
func Test(f func(context.Context, *vim25.Client), model ...*Model) {
|
|
Run(func(ctx context.Context, c *vim25.Client) error {
|
|
f(ctx, c)
|
|
return nil
|
|
}, model...)
|
|
}
|