Skip to content

Commit 985eec8

Browse files
committed
Remove openshift and k8s pins
1 parent 796738b commit 985eec8

File tree

2,763 files changed

+293889
-201116
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

2,763 files changed

+293889
-201116
lines changed

cmd/aro/operator.go

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,6 @@ func operator(ctx context.Context, log *logrus.Entry) error {
7575

7676
mgr, err := ctrl.NewManager(restConfig, ctrl.Options{
7777
HealthProbeBindAddress: ":8080",
78-
MetricsBindAddress: "0", // disabled
79-
Port: 8443,
8078
})
8179
if err != nil {
8280
return err

go.mod

Lines changed: 90 additions & 617 deletions
Large diffs are not rendered by default.

go.sum

Lines changed: 212 additions & 121 deletions
Large diffs are not rendered by default.

pkg/cluster/delete.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -330,13 +330,13 @@ func (m *manager) deleteGatewayAndWait(ctx context.Context) error {
330330
}
331331

332332
m.log.Info("waiting for gateway record deletion")
333-
return wait.PollImmediateUntil(15*time.Second, func() (bool, error) {
333+
return wait.PollUntilContextCancel(timeoutCtx, 15*time.Second, true, func(ctx context.Context) (bool, error) {
334334
_, err := m.dbGateway.Get(ctx, m.doc.OpenShiftCluster.Properties.NetworkProfile.GatewayPrivateLinkID)
335-
if err != nil && cosmosdb.IsErrorStatusCode(err, http.StatusNotFound) /* already gone */ {
336-
return true, nil
335+
if err != nil && cosmosdb.IsErrorStatusCode(err, http.StatusNotFound) {
336+
return true, err
337337
}
338338
return false, nil
339-
}, timeoutCtx.Done())
339+
})
340340
}
341341

342342
func (m *manager) deleteGateway(ctx context.Context) error {

pkg/cluster/deploybaseresources.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -357,7 +357,7 @@ func (m *manager) _attachNSGs(ctx context.Context, timeout time.Duration, pollIn
357357
// NSG since the inner loop is tolerant of that, and since we are attaching
358358
// the same NSG the only allowed failure case is when the NSG cannot be
359359
// attached to begin with, so it shouldn't happen in practice.
360-
_ = wait.PollImmediateUntil(pollInterval, func() (bool, error) {
360+
_ = wait.PollUntilContextCancel(timeoutCtx, pollInterval, true, func(ctx context.Context) (bool, error) {
361361
var c bool
362362
c, innerErr = func() (bool, error) {
363363
for _, subnetID := range []string{
@@ -414,9 +414,8 @@ func (m *manager) _attachNSGs(ctx context.Context, timeout time.Duration, pollIn
414414
}
415415
return true, nil
416416
}()
417-
418417
return c, innerErr
419-
}, timeoutCtx.Done())
418+
})
420419

421420
return innerErr
422421
}

pkg/cluster/install.go

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import (
1515
kruntime "k8s.io/apimachinery/pkg/runtime"
1616
"k8s.io/client-go/dynamic"
1717
"k8s.io/client-go/kubernetes"
18+
"k8s.io/client-go/rest"
1819
"sigs.k8s.io/controller-runtime/pkg/client"
1920
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
2021

@@ -632,7 +633,12 @@ func (m *manager) initializeKubernetesClients(ctx context.Context) error {
632633
return err
633634
}
634635

635-
mapper, err := apiutil.NewDynamicRESTMapper(restConfig, apiutil.WithLazyDiscovery)
636+
httpClient, err := rest.HTTPClientFor(restConfig)
637+
if err != nil {
638+
return err
639+
}
640+
641+
mapper, err := apiutil.NewDynamicRESTMapper(restConfig, httpClient)
636642
if err != nil {
637643
return err
638644
}

pkg/deploy/predeploy.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -553,9 +553,9 @@ func (d *deployer) restartOldScaleset(ctx context.Context, vmssName string, lbHe
553553
}
554554

555555
func (d *deployer) waitForReadiness(ctx context.Context, vmssName string, vmInstanceID string) error {
556-
return wait.PollImmediateUntil(10*time.Second, func() (bool, error) {
556+
return wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) {
557557
return d.isVMInstanceHealthy(ctx, d.config.RPResourceGroupName, vmssName, vmInstanceID), nil
558-
}, ctx.Done())
558+
})
559559
}
560560

561561
func (d *deployer) isVMInstanceHealthy(ctx context.Context, resourceGroupName string, vmssName string, vmInstanceID string) bool {

pkg/deploy/predeploy_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1647,7 +1647,7 @@ func TestWaitForReadiness(t *testing.T) {
16471647
cancel: cancelFastTimeout,
16481648
},
16491649
mocks: []mock{getInstanceViewMock(unhealthyVMSS)},
1650-
wantErr: "timed out waiting for the condition",
1650+
wantErr: "context deadline exceeded",
16511651
},
16521652
{
16531653
name: "run successfully after confirming healthy status",

pkg/deploy/upgrade_gateway.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,15 +39,15 @@ func (d *deployer) gatewayWaitForReadiness(ctx context.Context, vmssName string)
3939
}
4040

4141
d.log.Printf("waiting for %s instances to be healthy", vmssName)
42-
return wait.PollImmediateUntil(10*time.Second, func() (bool, error) {
42+
return wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) {
4343
for _, vm := range scalesetVMs {
4444
if !d.isVMInstanceHealthy(ctx, d.config.GatewayResourceGroupName, vmssName, *vm.InstanceID) {
4545
return false, nil
4646
}
4747
}
4848

4949
return true, nil
50-
}, ctx.Done())
50+
})
5151
}
5252

5353
func (d *deployer) gatewayRemoveOldScalesets(ctx context.Context) error {

pkg/deploy/upgrade_rp.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,15 +39,15 @@ func (d *deployer) rpWaitForReadiness(ctx context.Context, vmssName string) erro
3939
}
4040

4141
d.log.Printf("waiting for %s instances to be healthy", vmssName)
42-
return wait.PollImmediateUntil(10*time.Second, func() (bool, error) {
42+
return wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) {
4343
for _, vm := range scalesetVMs {
4444
if !d.isVMInstanceHealthy(ctx, d.config.RPResourceGroupName, vmssName, *vm.InstanceID) {
4545
return false, nil
4646
}
4747
}
4848

4949
return true, nil
50-
}, ctx.Done())
50+
})
5151
}
5252

5353
func (d *deployer) rpRemoveOldScalesets(ctx context.Context) error {

0 commit comments

Comments
 (0)