Skip to content

Commit f4e7850

Browse files
committed
Allow setting storage mappings for OVA disks
* Exposes OVA appliance disks as mappable storage in the OVA inventory collector. * Permits mapping any or all OVA appliance disks to their own destination storage classes. * Any disks that are not specifically mapped are assigned to a default storage class, which must be given a storage mapping. Signed-off-by: Sam Lucidi <[email protected]>
1 parent b0b9ae7 commit f4e7850

File tree

6 files changed

+182
-46
lines changed

6 files changed

+182
-46
lines changed

pkg/controller/plan/adapter/ova/builder.go

Lines changed: 77 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import (
1010
"strconv"
1111
"strings"
1212

13+
"github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1"
1314
"github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/plan"
1415
"github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/ref"
1516
planbase "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter/base"
@@ -61,6 +62,11 @@ const (
6162
Unknown = "unknown"
6263
)
6364

65+
// Default Storage
66+
const (
67+
DefaultStorageID = "default"
68+
)
69+
6470
// Regex which matches the snapshot identifier suffix of a
6571
// OVA disk backing file.
6672
var backingFilePattern = regexp.MustCompile(`-\d\d\d\d\d\d.vmdk`)
@@ -154,48 +160,88 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config
154160
return
155161
}
156162

157-
// For OVA provider we are assuming a single storage mapping.
158-
dsMapIn := r.Context.Map.Storage.Spec.Map
159-
for _, mapped := range dsMapIn {
163+
var defaultMapping *v1beta1.DestinationStorage
164+
mappedDiskIds := make(map[string]bool)
165+
storageMapIn := r.Context.Map.Storage.Spec.Map
166+
for i := range storageMapIn {
167+
mapped := &storageMapIn[i]
168+
ref := mapped.Source
169+
storage := &model.Storage{}
170+
fErr := r.Source.Inventory.Find(storage, ref)
171+
if fErr != nil {
172+
err = fErr
173+
return
174+
}
175+
if storage.ID == DefaultStorageID {
176+
defaultMapping = &mapped.Destination
177+
continue
178+
}
160179
for _, disk := range vm.Disks {
161-
diskSize, err := getResourceCapacity(disk.Capacity, disk.CapacityAllocationUnits)
162-
if err != nil {
163-
return nil, err
164-
}
165-
storageClass := mapped.Destination.StorageClass
166-
dvSource := cdi.DataVolumeSource{
167-
Blank: &cdi.DataVolumeBlankImage{},
168-
}
169-
dvSpec := cdi.DataVolumeSpec{
170-
Source: &dvSource,
171-
Storage: &cdi.StorageSpec{
172-
Resources: core.ResourceRequirements{
173-
Requests: core.ResourceList{
174-
core.ResourceStorage: *resource.NewQuantity(diskSize, resource.BinarySI),
175-
},
176-
},
177-
StorageClassName: &storageClass,
178-
},
180+
if disk.ID == storage.ID {
181+
var dv *cdi.DataVolume
182+
dv, err = r.mapDataVolume(disk, mapped.Destination, dvTemplate)
183+
if err != nil {
184+
return
185+
}
186+
dvs = append(dvs, *dv)
187+
mappedDiskIds[disk.ID] = true
179188
}
180-
// set the access mode and volume mode if they were specified in the storage map.
181-
// otherwise, let the storage profile decide the default values.
182-
if mapped.Destination.AccessMode != "" {
183-
dvSpec.Storage.AccessModes = []core.PersistentVolumeAccessMode{mapped.Destination.AccessMode}
189+
}
190+
}
191+
192+
for _, disk := range vm.Disks {
193+
if !mappedDiskIds[disk.ID] {
194+
if defaultMapping == nil {
195+
err = liberr.New("VM has unmapped disks and no default disk mapping is set.", "vm", vm.ID)
196+
return
184197
}
185-
if mapped.Destination.VolumeMode != "" {
186-
dvSpec.Storage.VolumeMode = &mapped.Destination.VolumeMode
198+
var dv *cdi.DataVolume
199+
dv, err = r.mapDataVolume(disk, *defaultMapping, dvTemplate)
200+
if err != nil {
201+
return
187202
}
188-
189-
dv := dvTemplate.DeepCopy()
190-
dv.Spec = dvSpec
191-
updateDataVolumeAnnotations(dv, &disk)
192203
dvs = append(dvs, *dv)
193204
}
194205
}
195206

196207
return
197208
}
198209

210+
func (r *Builder) mapDataVolume(disk ova.Disk, destination v1beta1.DestinationStorage, dvTemplate *cdi.DataVolume) (dv *cdi.DataVolume, err error) {
211+
diskSize, err := getResourceCapacity(disk.Capacity, disk.CapacityAllocationUnits)
212+
if err != nil {
213+
return
214+
}
215+
storageClass := destination.StorageClass
216+
dvSource := cdi.DataVolumeSource{
217+
Blank: &cdi.DataVolumeBlankImage{},
218+
}
219+
dvSpec := cdi.DataVolumeSpec{
220+
Source: &dvSource,
221+
Storage: &cdi.StorageSpec{
222+
Resources: core.ResourceRequirements{
223+
Requests: core.ResourceList{
224+
core.ResourceStorage: *resource.NewQuantity(diskSize, resource.BinarySI),
225+
},
226+
},
227+
StorageClassName: &storageClass,
228+
},
229+
}
230+
// set the access mode and volume mode if they were specified in the storage map.
231+
// otherwise, let the storage profile decide the default values.
232+
if destination.AccessMode != "" {
233+
dvSpec.Storage.AccessModes = []core.PersistentVolumeAccessMode{destination.AccessMode}
234+
}
235+
if destination.VolumeMode != "" {
236+
dvSpec.Storage.VolumeMode = &destination.VolumeMode
237+
}
238+
239+
dv = dvTemplate.DeepCopy()
240+
dv.Spec = dvSpec
241+
updateDataVolumeAnnotations(dv, &disk)
242+
return
243+
}
244+
199245
func updateDataVolumeAnnotations(dv *cdi.DataVolume, disk *ova.Disk) {
200246
if dv.ObjectMeta.Annotations == nil {
201247
dv.ObjectMeta.Annotations = make(map[string]string)

pkg/controller/plan/adapter/ova/validator.go

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,8 +84,27 @@ func (r *Validator) PodNetwork(vmRef ref.Ref) (ok bool, err error) {
8484

8585
// Validate that a VM's disk backing storage has been mapped.
8686
func (r *Validator) StorageMapped(vmRef ref.Ref) (ok bool, err error) {
87-
//For OVA providers, we don't have an actual storage connected,
88-
// since we use a dummy storage for mapping the function should always return true.
87+
if r.plan.Referenced.Map.Storage == nil {
88+
return
89+
}
90+
vm := &model.VM{}
91+
err = r.inventory.Find(vm, vmRef)
92+
if err != nil {
93+
err = liberr.Wrap(err, "vm", vmRef.String())
94+
return
95+
}
96+
97+
// If a default mapping is defined, that satisfies the requirement.
98+
if r.plan.Referenced.Map.Storage.Status.Refs.Find(ref.Ref{ID: DefaultStorageID}) {
99+
ok = true
100+
return
101+
}
102+
103+
for _, disk := range vm.Disks {
104+
if !r.plan.Referenced.Map.Storage.Status.Refs.Find(ref.Ref{ID: disk.ID}) {
105+
return
106+
}
107+
}
89108
ok = true
90109
return
91110
}

pkg/controller/provider/container/ova/model.go

Lines changed: 81 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ package ova
33
import (
44
"context"
55
"errors"
6-
"fmt"
76

87
api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1"
98
model "github.com/konveyor/forklift-controller/pkg/controller/provider/model/ova"
@@ -12,6 +11,12 @@ import (
1211
"github.com/konveyor/forklift-controller/pkg/lib/logging"
1312
)
1413

14+
// Default Storage
15+
const (
16+
DefaultStorageID = "default"
17+
DefaultStorageName = "Default"
18+
)
19+
1520
// All adapters.
1621
var adapterList []Adapter
1722

@@ -356,33 +361,99 @@ type StorageAdapter struct {
356361
}
357362

358363
func (r *StorageAdapter) GetUpdates(ctx *Context) (updates []Updater, err error) {
364+
disks := []Disk{}
365+
err = ctx.client.list("disks", &disks)
366+
if err != nil {
367+
return
368+
}
369+
for i := range disks {
370+
disk := &disks[i]
371+
updater := func(tx *libmodel.Tx) (err error) {
372+
m := &model.Storage{
373+
Base: model.Base{
374+
ID: disk.ID,
375+
},
376+
}
377+
err = tx.Get(m)
378+
if err != nil {
379+
if errors.Is(err, libmodel.NotFound) {
380+
m.Name = disk.Name
381+
err = tx.Insert(m)
382+
}
383+
return
384+
}
385+
m.Name = disk.Name
386+
err = tx.Update(m)
387+
return
388+
}
389+
updates = append(updates, updater)
390+
}
359391
return
360392
}
361393

362394
// List the collection.
363395
func (r *StorageAdapter) List(ctx *Context, provider *api.Provider) (itr fb.Iterator, err error) {
364-
storageName := fmt.Sprintf("Dummy storage for source provider %s", provider.Name)
365-
dummyStorge := Storage{
366-
Name: storageName,
367-
ID: string(provider.UID),
396+
diskList := []Disk{}
397+
err = ctx.client.list("disks", &diskList)
398+
if err != nil {
399+
return
368400
}
369401
list := fb.NewList()
370402
m := &model.Storage{
371403
Base: model.Base{
372-
ID: dummyStorge.ID,
373-
Name: dummyStorge.Name,
404+
ID: DefaultStorageID,
405+
Name: DefaultStorageName,
374406
},
375407
}
376-
dummyStorge.ApplyTo(m)
377408
list.Append(m)
378409

410+
for _, object := range diskList {
411+
m := &model.Storage{
412+
Base: model.Base{
413+
ID: object.ID,
414+
Name: object.Name,
415+
},
416+
}
417+
list.Append(m)
418+
}
419+
379420
itr = list.Iter()
380421

381422
return
382423
}
383424

384425
func (r *StorageAdapter) DeleteUnexisting(ctx *Context) (deletions []Updater, err error) {
385-
// Each provider have only one storage hence it can't be changed,
386-
// Will be removed only if the provider deleted.
426+
storageList := []model.Storage{}
427+
err = ctx.db.List(&storageList, libmodel.FilterOptions{})
428+
if err != nil {
429+
if errors.Is(err, libmodel.NotFound) {
430+
err = nil
431+
}
432+
return
433+
}
434+
inventory := make(map[string]bool)
435+
for _, storage := range storageList {
436+
inventory[storage.ID] = true
437+
}
438+
disks := []Disk{}
439+
err = ctx.client.list("disks", &disks)
440+
if err != nil {
441+
return
442+
}
443+
gone := []string{}
444+
for _, disk := range disks {
445+
if _, found := inventory[disk.ID]; !found {
446+
gone = append(gone, disk.ID)
447+
}
448+
}
449+
for _, id := range gone {
450+
updater := func(tx *libmodel.Tx) (err error) {
451+
m := &model.Storage{
452+
Base: model.Base{ID: id},
453+
}
454+
return tx.Delete(m)
455+
}
456+
deletions = append(deletions, updater)
457+
}
387458
return
388459
}

pkg/controller/provider/web/ova/client.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -298,6 +298,7 @@ func (r *Finder) Storage(ref *base.Ref) (object interface{}, err error) {
298298
storage := &Storage{}
299299
err = r.ByRef(storage, *ref)
300300
if err == nil {
301+
ref.ID = storage.ID
301302
ref.Name = storage.Name
302303
object = storage
303304
}

tests/suit/framework/ova.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ func (r *OvaClient) LoadSourceDetails() (vm *OvaVM, err error) {
1919

2020
r.vmData.testVMId = "c5686650854d1e69b4123f4bf2e70fe1ed2a"
2121
r.vmData.testNetworkID = "ae1badc8c693926f492a01e2f357d6af321b"
22-
r.vmData.testStorageName = "Dummy storage for source provider ova-provider"
22+
r.vmData.testStorageName = "centos44_new-disk1.vmdk"
2323
return &r.vmData, nil
2424
}
2525

@@ -39,7 +39,7 @@ func (r *OvaClient) GetNfsServerForOva(k8sClient *kubernetes.Clientset) (string,
3939
}
4040
nfsShare := server + ":" + share
4141
if nfsShare == "" {
42-
return "", errors.New("failed to fatch NFS settings")
42+
return "", errors.New("failed to fetch NFS settings")
4343
}
4444

4545
r.nfsPath = nfsShare

tests/suit/ova_test.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,5 @@ var _ = Describe("[level:component]Migration tests for OVA provider", func() {
8080
Expect(err).ToNot(HaveOccurred())
8181
err = utils.WaitForMigrationSucceededWithTimeout(f.CrClient, provider.Namespace, test_migration_name, 900*time.Second)
8282
Expect(err).ToNot(HaveOccurred())
83-
8483
})
8584
})

0 commit comments

Comments
 (0)