Skip to content

Commit

Permalink
Re-enable unit tests and linting in gha
Browse files Browse the repository at this point in the history
  • Loading branch information
dee-kryvenko committed Feb 10, 2025
1 parent 7060490 commit 9ae92b4
Show file tree
Hide file tree
Showing 13 changed files with 51 additions and 42 deletions.
5 changes: 2 additions & 3 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
name: Lint

on:
# Disable for now, until we have something to test
# push:
# pull_request:
push:
pull_request:
workflow_dispatch:

jobs:
Expand Down
5 changes: 2 additions & 3 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
name: Tests

on:
# Disable for now, until we have something to test
# push:
# pull_request:
push:
pull_request:
workflow_dispatch:

jobs:
Expand Down
2 changes: 1 addition & 1 deletion api/autoscaler/common/replica.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func (list ReplicaList) SerializeToString() string {
}
sort.Strings(keys)

var serializedItems []string
serializedItems := make([]string, 0, len(keys))
for _, k := range keys {
serializedItems = append(serializedItems, fmt.Sprintf("%s=%v", k, m[k]))
}
Expand Down
4 changes: 2 additions & 2 deletions api/autoscaler/v1alpha1/weightedpnormloadindex_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ type WeightedPNormLoadIndexWeight struct {
// meaning that if there are negative input values (based on previous normalization),
// they will be reducing the load index accordingly to their weight.
// For example - Robust Scaling normalization results in 0 representing a median.
// Depending on the original source of normalized values, this may or may not be desireable.
// Depending on the original source of normalized values, this may or may not be desirable.
// Set to false to assume replace all negative values with 0.
// That will prevent load index reductions and it will only go up from positive values.
// For Robust Scaling normalization, for example,
Expand All @@ -55,7 +55,7 @@ type WeightedPNormLoadIndexSpec struct {
// +kubebuilder:validation:Required
P int32 `json:"p,omitempty"`

// Weigths is the list of metrics and their weights to use in this load index.
// Weights is the list of metrics and their weights to use in this load index.
// +kubebuilder:validation:Required
Weights []WeightedPNormLoadIndexWeight `json:"weights,omitempty"`
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ import (
autoscalerv1alpha1 "github.com/plumber-cd/argocd-autoscaler/api/autoscaler/v1alpha1"
autoscalercontroller "github.com/plumber-cd/argocd-autoscaler/internal/controller/autoscaler"
"github.com/plumber-cd/argocd-autoscaler/loadindexers/weightedpnorm"
robustscaling "github.com/plumber-cd/argocd-autoscaler/normalizers/robustscaling"
"github.com/plumber-cd/argocd-autoscaler/normalizers/robustscaling"
"github.com/plumber-cd/argocd-autoscaler/partitioners/longestprocessingtime"
"github.com/plumber-cd/argocd-autoscaler/pollers/prometheus"
// +kubebuilder:scaffold:imports
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ spec:
format: int32
type: integer
weights:
description: Weigths is the list of metrics and their weights to use
description: Weights is the list of metrics and their weights to use
in this load index.
items:
description: WeightedPNormLoadIndexWeight is the weight of the metric.
Expand All @@ -91,7 +91,7 @@ spec:
meaning that if there are negative input values (based on previous normalization),
they will be reducing the load index accordingly to their weight.
For example - Robust Scaling normalization results in 0 representing a median.
Depending on the original source of normalized values, this may or may not be desireable.
Depending on the original source of normalized values, this may or may not be desirable.
Set to false to assume replace all negative values with 0.
That will prevent load index reductions and it will only go up from positive values.
For Robust Scaling normalization, for example,
Expand Down
14 changes: 9 additions & 5 deletions internal/controller/autoscaler/replicasetscaler_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ const (
ReplicaSetReconcilerModeDefault = ReplicaSetReconcilerMode("default")
ReplicaSetReconcilerModeRolloutRestart = ReplicaSetReconcilerMode("rollout-restart")
ReplicaSetReconcilerModeX0Y = ReplicaSetReconcilerMode("x0y")

ReplicaSetControllerKindStatefulSet = "StatefulSet"
)

type ReplicaSetController struct {
Expand All @@ -69,6 +71,8 @@ type ReplicaSetScalerReconciler struct {

// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/[email protected]/pkg/reconcile
//
//nolint:gocyclo
func (r *ReplicaSetScalerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := log.FromContext(ctx)
log.V(1).Info("Received reconcile request")
Expand Down Expand Up @@ -203,7 +207,7 @@ func (r *ReplicaSetScalerReconciler) Reconcile(ctx context.Context, req ctrl.Req

replicaSetController := ReplicaSetController{}
switch scaler.Spec.ReplicaSetControllerRef.Kind {
case "StatefulSet":
case ReplicaSetControllerKindStatefulSet:
replicaSetController.Kind = scaler.Spec.ReplicaSetControllerRef.Kind
default:
err := fmt.Errorf("Unsupported ReplicaSetControllerRef.Kind")
Expand All @@ -227,7 +231,7 @@ func (r *ReplicaSetScalerReconciler) Reconcile(ctx context.Context, req ctrl.Req
}

switch replicaSetController.Kind {
case "StatefulSet":
case ReplicaSetControllerKindStatefulSet:
statefulSetController, err := findByRef[*appsv1.StatefulSet](
ctx,
r.Scheme,
Expand Down Expand Up @@ -407,7 +411,7 @@ func (r *ReplicaSetScalerReconciler) Reconcile(ctx context.Context, req ctrl.Req

func (r *ReplicaSetScalerReconciler) GetRSControllerDesiredReplicas(replicaSetController ReplicaSetController) int32 {
switch replicaSetController.Kind {
case "StatefulSet":
case ReplicaSetControllerKindStatefulSet:
return *replicaSetController.StatefulSet.Spec.Replicas
default:
panic("unreachable")
Expand All @@ -416,7 +420,7 @@ func (r *ReplicaSetScalerReconciler) GetRSControllerDesiredReplicas(replicaSetCo

func (r *ReplicaSetScalerReconciler) GetRSControllerActualReplicas(replicaSetController ReplicaSetController) int32 {
switch replicaSetController.Kind {
case "StatefulSet":
case ReplicaSetControllerKindStatefulSet:
return replicaSetController.StatefulSet.Status.Replicas
default:
panic("unreachable")
Expand All @@ -426,7 +430,7 @@ func (r *ReplicaSetScalerReconciler) GetRSControllerActualReplicas(replicaSetCon
func (r *ReplicaSetScalerReconciler) ScaleTo(ctx context.Context, replicaSetController ReplicaSetController, replicas int32, restart bool) error {
var obj client.Object
switch replicaSetController.Kind {
case "StatefulSet":
case ReplicaSetControllerKindStatefulSet:
replicaSetController.StatefulSet.Spec.Replicas = ptr.To(replicas)
containerFound := false
for containerIndex, container := range replicaSetController.StatefulSet.Spec.Template.Spec.Containers {
Expand Down
26 changes: 13 additions & 13 deletions internal/controller/autoscaler/replicasetscaler_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -493,7 +493,7 @@ var _ = Describe("ReplicaSetScaler Controller", func() {
func(run *ScenarioRun[*autoscalerv1alpha1.ReplicaSetScaler]) {
Expect(run.ReconcileError()).To(HaveOccurred())
Expect(run.ReconcileError().Error()).To(ContainSubstring("fake failure to update shard manager"))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Duration(time.Second)))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Second))
Expect(run.ReconcileResult().Requeue).To(BeFalse())

By("Checking conditions")
Expand All @@ -513,7 +513,7 @@ var _ = Describe("ReplicaSetScaler Controller", func() {
"update shard manager with the expected replicas",
func(run *ScenarioRun[*autoscalerv1alpha1.ReplicaSetScaler]) {
Expect(run.ReconcileError()).NotTo(HaveOccurred())
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Duration(time.Second)))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Second))
Expect(run.ReconcileResult().Requeue).To(BeFalse())

By("Checking conditions")
Expand Down Expand Up @@ -557,7 +557,7 @@ var _ = Describe("ReplicaSetScaler Controller", func() {
"begin RS controller scaling",
func(run *ScenarioRun[*autoscalerv1alpha1.ReplicaSetScaler]) {
Expect(run.ReconcileError()).NotTo(HaveOccurred())
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Duration(time.Second)))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Second))
Expect(run.ReconcileResult().Requeue).To(BeFalse())

By("Checking conditions")
Expand Down Expand Up @@ -629,7 +629,7 @@ var _ = Describe("ReplicaSetScaler Controller", func() {
func(run *ScenarioRun[*autoscalerv1alpha1.ReplicaSetScaler]) {
Expect(run.ReconcileError()).To(HaveOccurred())
Expect(run.ReconcileError().Error()).To(ContainSubstring("fake failure to update sts"))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Duration(time.Second)))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Second))
Expect(run.ReconcileResult().Requeue).To(BeFalse())

By("Checking conditions")
Expand All @@ -649,7 +649,7 @@ var _ = Describe("ReplicaSetScaler Controller", func() {
"apply STS scaling",
func(run *ScenarioRun[*autoscalerv1alpha1.ReplicaSetScaler]) {
Expect(run.ReconcileError()).ToNot(HaveOccurred())
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Duration(time.Second)))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Second))
Expect(run.ReconcileResult().Requeue).To(BeFalse())

By("Checking conditions")
Expand Down Expand Up @@ -707,7 +707,7 @@ var _ = Describe("ReplicaSetScaler Controller", func() {
"apply STS scaling and restart",
func(run *ScenarioRun[*autoscalerv1alpha1.ReplicaSetScaler]) {
Expect(run.ReconcileError()).ToNot(HaveOccurred())
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Duration(time.Second)))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Second))
Expect(run.ReconcileResult().Requeue).To(BeFalse())

By("Checking conditions")
Expand Down Expand Up @@ -847,7 +847,7 @@ var _ = Describe("ReplicaSetScaler Controller", func() {
func(run *ScenarioRun[*autoscalerv1alpha1.ReplicaSetScaler]) {
Expect(run.ReconcileError()).To(HaveOccurred())
Expect(run.ReconcileError().Error()).To(ContainSubstring("fake failure to update sts"))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Duration(time.Second)))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Second))
Expect(run.ReconcileResult().Requeue).To(BeFalse())

By("Checking conditions")
Expand All @@ -867,7 +867,7 @@ var _ = Describe("ReplicaSetScaler Controller", func() {
"apply STS scaling to 0",
func(run *ScenarioRun[*autoscalerv1alpha1.ReplicaSetScaler]) {
Expect(run.ReconcileError()).ToNot(HaveOccurred())
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Duration(time.Second)))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Second))
Expect(run.ReconcileResult().Requeue).To(BeFalse())

By("Checking conditions")
Expand Down Expand Up @@ -935,7 +935,7 @@ var _ = Describe("ReplicaSetScaler Controller", func() {
func(run *ScenarioRun[*autoscalerv1alpha1.ReplicaSetScaler]) {
Expect(run.ReconcileError()).To(HaveOccurred())
Expect(run.ReconcileError().Error()).To(ContainSubstring("fake failure to update shard manager"))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Duration(time.Second)))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Second))
Expect(run.ReconcileResult().Requeue).To(BeFalse())

By("Checking conditions")
Expand All @@ -955,7 +955,7 @@ var _ = Describe("ReplicaSetScaler Controller", func() {
"update shard manager with the expected replicas",
func(run *ScenarioRun[*autoscalerv1alpha1.ReplicaSetScaler]) {
Expect(run.ReconcileError()).NotTo(HaveOccurred())
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Duration(time.Second)))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Second))
Expect(run.ReconcileResult().Requeue).To(BeFalse())

By("Checking conditions")
Expand Down Expand Up @@ -999,7 +999,7 @@ var _ = Describe("ReplicaSetScaler Controller", func() {
"begin RS controller scaling",
func(run *ScenarioRun[*autoscalerv1alpha1.ReplicaSetScaler]) {
Expect(run.ReconcileError()).NotTo(HaveOccurred())
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Duration(time.Second)))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Second))
Expect(run.ReconcileResult().Requeue).To(BeFalse())

By("Checking conditions")
Expand Down Expand Up @@ -1071,7 +1071,7 @@ var _ = Describe("ReplicaSetScaler Controller", func() {
func(run *ScenarioRun[*autoscalerv1alpha1.ReplicaSetScaler]) {
Expect(run.ReconcileError()).To(HaveOccurred())
Expect(run.ReconcileError().Error()).To(ContainSubstring("fake failure to update sts"))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Duration(time.Second)))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Second))
Expect(run.ReconcileResult().Requeue).To(BeFalse())

By("Checking conditions")
Expand All @@ -1091,7 +1091,7 @@ var _ = Describe("ReplicaSetScaler Controller", func() {
"apply STS scaling",
func(run *ScenarioRun[*autoscalerv1alpha1.ReplicaSetScaler]) {
Expect(run.ReconcileError()).ToNot(HaveOccurred())
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Duration(time.Second)))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Second))
Expect(run.ReconcileResult().Requeue).To(BeFalse())

By("Checking conditions")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ var _ = Describe("SecretTypeClusterShardManager Controller", func() {
func(run *ScenarioRun[*autoscalerv1alpha1.SecretTypeClusterShardManager]) {
Expect(run.ReconcileError()).To(HaveOccurred())
Expect(run.ReconcileError().Error()).To(Equal("fake error updating secret"))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Duration(time.Second)))
Expect(run.ReconcileResult().RequeueAfter).To(Equal(time.Second))
Expect(run.ReconcileResult().Requeue).To(BeFalse())

By("Checking ready condition")
Expand Down
11 changes: 5 additions & 6 deletions loadindexers/weightedpnorm/weightedpnorm.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,16 +66,16 @@ func (li *LoadIndexerImpl) Calculate(

loadIndexes := []common.LoadIndex{}
for shardUID, values := range metricsByShard {
value, err := li.loadIndex(
value := li.loadIndex(
p,
weights,
values,
)
valueAsString := strconv.FormatFloat(value, 'f', -1, 64)
valueAsResource, err := resource.ParseQuantity(valueAsString)
if err != nil {
return nil, err
}
valueAsString := strconv.FormatFloat(value, 'f', -1, 64)
valueAsResource, err := resource.ParseQuantity(valueAsString)
loadIndexes = append(loadIndexes, common.LoadIndex{
Shard: shardsByUID[shardUID],
Value: valueAsResource,
Expand All @@ -90,8 +90,7 @@ func (_ *LoadIndexerImpl) loadIndex(
p int32,
weights map[string]autoscaler.WeightedPNormLoadIndexWeight,
values []common.MetricValue,
) (float64, error) {

) float64 {
sum := 0.0
for _, m := range values {
weight, ok := weights[m.ID]
Expand All @@ -106,5 +105,5 @@ func (_ *LoadIndexerImpl) loadIndex(
sum += weightValue * math.Pow(math.Abs(value), float64(p))
}

return math.Pow(sum, float64(1)/float64(p)), nil
return math.Pow(sum, float64(1)/float64(p))
}
6 changes: 5 additions & 1 deletion test/harness/fake_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,11 @@ func (f *FakeClient) WithStatusUpdateFunction(container GenericObjectContainer,

// Update calls the custom Update function if it was registered for the given object status.
// Otherwise, it calls the original client.Client.Status().Update function.
func (s *FakeStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error {
func (s *FakeStatusWriter) Update(
ctx context.Context,
obj client.Object,
opts ...client.SubResourceUpdateOption,
) error {
if s.parent.statusUpdateFunctions != nil {
if functions, ok := s.parent.statusUpdateFunctions[reflect.TypeOf(obj)]; ok {
if fn, ok := functions[client.ObjectKeyFromObject(obj).String()]; ok {
Expand Down
3 changes: 2 additions & 1 deletion test/harness/object_container.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@ type ObjectContainer[K client.Object] struct {
}

// NewObjectContainer creates a new ObjectContainer with the provided object.
// The object passed in must be initialized with metav1.ObjectMeta, where the NamespacedName and ObjectKey would inferred from.
// The object passed in must be initialized with metav1.ObjectMeta,
// where the NamespacedName and ObjectKey would inferred from.
// Optionally, the set of prep functions is taken in to prepare an object beyond the metadata (i.e. spec).
// This can use unprepared client, including client.Client. It does not have to be the one used in a ScenarioRun.
// NOTE that this function does NOT create a resource in the cluster. The client is only used for schema and RESTMapper.
Expand Down
9 changes: 6 additions & 3 deletions test/harness/scenario.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,8 @@ func (s *Scenario[K]) Branch(
return s
}

// BranchResourceNotFoundCheck is a shortcut function that uses scenario client and a set of checks that are expecting clean exit with no re-queue.
// BranchResourceNotFoundCheck is a shortcut function that uses fake client
// and a set of checks that are expecting clean exit with no re-queue.
// Acts as a separate branch, does not modify the state, and allows to continue the scenario without impacting it.
func (s *Scenario[K]) BranchResourceNotFoundCheck(callback ScenarioRegisterCallback[K]) *Scenario[K] {
s.Branch("resource not found", func(branch *Scenario[K]) {
Expand All @@ -162,7 +163,8 @@ func (s *Scenario[K]) BranchResourceNotFoundCheck(callback ScenarioRegisterCallb
return s
}

// BranchFailureToGetResourceCheck is a shortcut function that uses a WithClientFromParentThatFailsToGetResource() and a set of checks that are expecting a corresponding error from it.
// BranchFailureToGetResourceCheck is a shortcut function that uses a WithClientFromParentThatFailsToGetResource()
// and a set of checks that are expecting a corresponding error from it.
// Acts as a separate branch, does not modify the state, and allows to continue the scenario without impacting it.
func (s *Scenario[K]) BranchFailureToGetResourceCheck(callback ScenarioRegisterCallback[K]) *Scenario[K] {
s.Branch("failure to get resource", func(branch *Scenario[K]) {
Expand All @@ -179,7 +181,8 @@ func (s *Scenario[K]) BranchFailureToGetResourceCheck(callback ScenarioRegisterC
return s
}

// BranchFailureToUpdateStatusCheck is a shortcut function that uses a WithClientFromParentThatFailsToUpdateStatus() and a set of check that are expecting a corresponding error from it.
// BranchFailureToUpdateStatusCheck is a shortcut function that uses a WithClientFromParentThatFailsToUpdateStatus()
// and a set of check that are expecting a corresponding error from it.
// Acts as a separate branch, does not modify the state, and allows to continue the scenario without impacting it.
func (s *Scenario[K]) BranchFailureToUpdateStatusCheck(callback ScenarioRegisterCallback[K]) *Scenario[K] {
s.Branch("failure to update status", func(branch *Scenario[K]) {
Expand Down

0 comments on commit 9ae92b4

Please sign in to comment.