diff --git a/Makefile b/Makefile
index 7edf15aee..b797f1b1f 100644
--- a/Makefile
+++ b/Makefile
@@ -399,9 +399,7 @@ e2e-provider-deploy:
 e2e-deploy-manifest:
 	kubectl apply -f manifest_staging/deploy/csidriver.yaml
 	kubectl apply -f manifest_staging/deploy/rbac-secretproviderclass.yaml
-	kubectl apply -f manifest_staging/deploy/rbac-secretproviderrotation.yaml
 	kubectl apply -f manifest_staging/deploy/rbac-secretprovidersyncing.yaml
-	kubectl apply -f manifest_staging/deploy/rbac-secretprovidertokenrequest.yaml
 	kubectl apply -f manifest_staging/deploy/secrets-store.csi.x-k8s.io_secretproviderclasses.yaml
 	kubectl apply -f manifest_staging/deploy/secrets-store.csi.x-k8s.io_secretproviderclasspodstatuses.yaml
 	kubectl apply -f manifest_staging/deploy/role-secretproviderclasses-admin.yaml
diff --git a/cmd/secrets-store-csi-driver/main.go b/cmd/secrets-store-csi-driver/main.go
index ef3dcc5e1..0c649270e 100644
--- a/cmd/secrets-store-csi-driver/main.go
+++ b/cmd/secrets-store-csi-driver/main.go
@@ -27,9 +27,7 @@ import (
 
 	secretsstorev1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1"
 	"sigs.k8s.io/secrets-store-csi-driver/controllers"
-	"sigs.k8s.io/secrets-store-csi-driver/pkg/k8s"
 	"sigs.k8s.io/secrets-store-csi-driver/pkg/metrics"
-	"sigs.k8s.io/secrets-store-csi-driver/pkg/rotation"
 	secretsstore "sigs.k8s.io/secrets-store-csi-driver/pkg/secrets-store"
 	"sigs.k8s.io/secrets-store-csi-driver/pkg/version"
 
@@ -39,7 +37,6 @@ import (
 	"k8s.io/apimachinery/pkg/fields"
 	"k8s.io/apimachinery/pkg/labels"
 	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/client-go/kubernetes"
 	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
 	"k8s.io/client-go/rest"
 	"k8s.io/klog/v2"
@@ -203,26 +200,7 @@ func mainErr() error {
 		reconciler.RunPatcher(ctx)
 	}()
 
-	// token request client
-	kubeClient := kubernetes.NewForConfigOrDie(cfg)
-	tokenClient := k8s.NewTokenClient(kubeClient, *driverName, 10*time.Minute)
-
-	if err = tokenClient.Run(ctx.Done()); err != nil {
-		klog.ErrorS(err, "failed to run token client")
-		return err
-	}
-
-	// Secret rotation
-	if *enableSecretRotation {
-		rec, err := rotation.NewReconciler(*driverName, mgr.GetCache(), scheme, *rotationPollInterval, providerClients, tokenClient)
-		if err != nil {
-			klog.ErrorS(err, "failed to initialize rotation reconciler")
-			return err
-		}
-		go rec.Run(ctx.Done())
-	}
-
-	driver := secretsstore.NewSecretsStoreDriver(*driverName, *nodeID, *endpoint, providerClients, mgr.GetClient(), mgr.GetAPIReader(), tokenClient)
+	driver := secretsstore.NewSecretsStoreDriver(*driverName, *nodeID, *endpoint, providerClients, mgr.GetClient(), mgr.GetAPIReader(), *enableSecretRotation, *rotationPollInterval)
 	driver.Run(ctx)
 
 	return nil
diff --git a/controllers/secretproviderclasspodstatus_controller.go b/controllers/secretproviderclasspodstatus_controller.go
index 8fbee8c14..d34be4895 100644
--- a/controllers/secretproviderclasspodstatus_controller.go
+++ b/controllers/secretproviderclasspodstatus_controller.go
@@ -295,58 +295,44 @@ func (r *SecretProviderClassPodStatusReconciler) Reconcile(ctx context.Context,
 			errs = append(errs, fmt.Errorf("failed to validate secret object in spc %s/%s, err: %w", spc.Namespace, spc.Name, err))
 			continue
 		}
-		exists, err := r.secretExists(ctx, secretName, req.Namespace)
-		if err != nil {
-			klog.ErrorS(err, "failed to check if secret exists", "secret", klog.ObjectRef{Namespace: req.Namespace, Name: secretName}, "spc", klog.KObj(spc), "pod", klog.KObj(pod), "spcps", klog.KObj(spcPodStatus))
-			// syncSecret.enabled is set to false by default in the helm chart for installing the driver in v0.0.23+
-			// that would result in a forbidden error, so generate a warning that can be helpful for debugging
-			if apierrors.IsForbidden(err) {
-				klog.Warning(SyncSecretForbiddenWarning)
-			}
-			errs = append(errs, fmt.Errorf("failed to check if secret %s exists, err: %w", secretName, err))
-			continue
-		}
 
 		var funcs []func() (bool, error)
+		secretType := secretutil.GetSecretType(strings.TrimSpace(secretObj.Type))
 
-		if !exists {
-			secretType := secretutil.GetSecretType(strings.TrimSpace(secretObj.Type))
-
-			var datamap map[string][]byte
-			if datamap, err = secretutil.GetSecretData(secretObj.Data, secretType, files); err != nil {
-				r.generateEvent(pod, corev1.EventTypeWarning, secretCreationFailedReason, fmt.Sprintf("failed to get data in spc %s/%s for secret %s, err: %+v", req.Namespace, spcName, secretName, err))
-				klog.ErrorS(err, "failed to get data in spc for secret", "spc", klog.KObj(spc), "pod", klog.KObj(pod), "secret", klog.ObjectRef{Namespace: req.Namespace, Name: secretName}, "spcps", klog.KObj(spcPodStatus))
-				errs = append(errs, fmt.Errorf("failed to get data in spc %s/%s for secret %s, err: %w", req.Namespace, spcName, secretName, err))
-				continue
-			}
+		var datamap map[string][]byte
+		if datamap, err = secretutil.GetSecretData(secretObj.Data, secretType, files); err != nil {
+			r.generateEvent(pod, corev1.EventTypeWarning, secretCreationFailedReason, fmt.Sprintf("failed to get data in spc %s/%s for secret %s, err: %+v", req.Namespace, spcName, secretName, err))
+			klog.ErrorS(err, "failed to get data in spc for secret", "spc", klog.KObj(spc), "pod", klog.KObj(pod), "secret", klog.ObjectRef{Namespace: req.Namespace, Name: secretName}, "spcps", klog.KObj(spcPodStatus))
+			errs = append(errs, fmt.Errorf("failed to get data in spc %s/%s for secret %s, err: %w", req.Namespace, spcName, secretName, err))
+			continue
+		}
 
-			labelsMap := make(map[string]string)
-			if secretObj.Labels != nil {
-				labelsMap = secretObj.Labels
-			}
-			annotationsMap := make(map[string]string)
-			if secretObj.Annotations != nil {
-				annotationsMap = secretObj.Annotations
-			}
-			// Set secrets-store.csi.k8s.io/managed=true label on the secret that's created and managed
-			// by the secrets-store-csi-driver. This label will be used to perform a filtered list watch
-			// only on secrets created and managed by the driver
-			labelsMap[SecretManagedLabel] = "true"
-
-			createFn := func() (bool, error) {
-				if err := r.createK8sSecret(ctx, secretName, req.Namespace, datamap, labelsMap, annotationsMap, secretType); err != nil {
-					klog.ErrorS(err, "failed to create Kubernetes secret", "spc", klog.KObj(spc), "pod", klog.KObj(pod), "secret", klog.ObjectRef{Namespace: req.Namespace, Name: secretName}, "spcps", klog.KObj(spcPodStatus))
-					// syncSecret.enabled is set to false by default in the helm chart for installing the driver in v0.0.23+
-					// that would result in a forbidden error, so generate a warning that can be helpful for debugging
-					if apierrors.IsForbidden(err) {
-						klog.Warning(SyncSecretForbiddenWarning)
-					}
-					return false, nil
+		labelsMap := make(map[string]string)
+		if secretObj.Labels != nil {
+			labelsMap = secretObj.Labels
+		}
+		annotationsMap := make(map[string]string)
+		if secretObj.Annotations != nil {
+			annotationsMap = secretObj.Annotations
+		}
+		// Set secrets-store.csi.k8s.io/managed=true label on the secret that's created and managed
+		// by the secrets-store-csi-driver. This label will be used to perform a filtered list watch
+		// only on secrets created and managed by the driver
+		labelsMap[SecretManagedLabel] = "true"
+
+		createFn := func() (bool, error) {
+			if err := r.createOrUpdateK8sSecret(ctx, secretName, req.Namespace, datamap, labelsMap, annotationsMap, secretType); err != nil {
+				klog.ErrorS(err, "failed to create Kubernetes secret", "spc", klog.KObj(spc), "pod", klog.KObj(pod), "secret", klog.ObjectRef{Namespace: req.Namespace, Name: secretName}, "spcps", klog.KObj(spcPodStatus))
+				// syncSecret.enabled is set to false by default in the helm chart for installing the driver in v0.0.23+
+				// that would result in a forbidden error, so generate a warning that can be helpful for debugging
+				if apierrors.IsForbidden(err) {
+					klog.Warning(SyncSecretForbiddenWarning)
 				}
-				return true, nil
+				return false, nil
 			}
-			funcs = append(funcs, createFn)
+			return true, nil
 		}
+		funcs = append(funcs, createFn)
 
 		for _, f := range funcs {
 			if err := wait.ExponentialBackoff(wait.Backoff{
@@ -410,9 +396,9 @@ func (r *SecretProviderClassPodStatusReconciler) processIfBelongsToNode(objMeta
 	return true
 }
 
-// createK8sSecret creates K8s secret with data from mounted files
+// createOrUpdateK8sSecret creates K8s secret with data from mounted files
 // If a secret with the same name already exists in the namespace of the pod, the error is nil.
-func (r *SecretProviderClassPodStatusReconciler) createK8sSecret(ctx context.Context, name, namespace string, datamap map[string][]byte, labelsmap map[string]string, annotationsmap map[string]string, secretType corev1.SecretType) error {
+func (r *SecretProviderClassPodStatusReconciler) createOrUpdateK8sSecret(ctx context.Context, name, namespace string, datamap map[string][]byte, labelsmap map[string]string, annotationsmap map[string]string, secretType corev1.SecretType) error {
 	secret := &corev1.Secret{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace:   namespace,
@@ -430,6 +416,13 @@ func (r *SecretProviderClassPodStatusReconciler) createK8sSecret(ctx context.Con
 		return nil
 	}
 	if apierrors.IsAlreadyExists(err) {
+		klog.InfoS("Kubernetes secret is already created", "secret", klog.ObjectRef{Namespace: namespace, Name: name})
+		err := r.writer.Update(ctx, secret)
+		if err != nil {
+			klog.ErrorS(err, "Unable to update kubernetes secret", "secret", klog.ObjectRef{Namespace: namespace, Name: name})
+			return err
+		}
+		klog.InfoS("successfully updated Kubernetes secret", "secret", klog.ObjectRef{Namespace: namespace, Name: name})
 		return nil
 	}
 	return err
@@ -477,23 +470,6 @@ func (r *SecretProviderClassPodStatusReconciler) patchSecretWithOwnerRef(ctx con
 	return nil
 }
 
-// secretExists checks if the secret with name and namespace already exists
-func (r *SecretProviderClassPodStatusReconciler) secretExists(ctx context.Context, name, namespace string) (bool, error) {
-	o := &corev1.Secret{}
-	secretKey := types.NamespacedName{
-		Namespace: namespace,
-		Name:      name,
-	}
-	err := r.Client.Get(ctx, secretKey, o)
-	if err == nil {
-		return true, nil
-	}
-	if apierrors.IsNotFound(err) {
-		return false, nil
-	}
-	return false, err
-}
-
 // generateEvent generates an event
 func (r *SecretProviderClassPodStatusReconciler) generateEvent(obj apiruntime.Object, eventType, reason, message string) {
 	if obj != nil {
diff --git a/controllers/secretproviderclasspodstatus_controller_test.go b/controllers/secretproviderclasspodstatus_controller_test.go
index 11c4bab30..0558acee6 100644
--- a/controllers/secretproviderclasspodstatus_controller_test.go
+++ b/controllers/secretproviderclasspodstatus_controller_test.go
@@ -121,31 +121,6 @@ func newReconciler(client client.Client, scheme *runtime.Scheme, nodeID string)
 	}
 }
 
-func TestSecretExists(t *testing.T) {
-	g := NewWithT(t)
-
-	scheme, err := setupScheme()
-	g.Expect(err).NotTo(HaveOccurred())
-
-	labels := map[string]string{"environment": "test"}
-	annotations := map[string]string{"kubed.appscode.com/sync": "app=test"}
-
-	initObjects := []client.Object{
-		newSecret("my-secret", "default", labels, annotations),
-	}
-
-	client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjects...).Build()
-	reconciler := newReconciler(client, scheme, "node1")
-
-	exists, err := reconciler.secretExists(context.TODO(), "my-secret", "default")
-	g.Expect(exists).To(Equal(true))
-	g.Expect(err).NotTo(HaveOccurred())
-
-	exists, err = reconciler.secretExists(context.TODO(), "my-secret2", "default")
-	g.Expect(exists).To(Equal(false))
-	g.Expect(err).NotTo(HaveOccurred())
-}
-
 func TestPatchSecretWithOwnerRef(t *testing.T) {
 	g := NewWithT(t)
 
@@ -183,7 +158,7 @@ func TestPatchSecretWithOwnerRef(t *testing.T) {
 	g.Expect(secret.GetOwnerReferences()).To(HaveLen(1))
 }
 
-func TestCreateK8sSecret(t *testing.T) {
+func TestCreateOrUpdateK8sSecret(t *testing.T) {
 	g := NewWithT(t)
 
 	scheme, err := setupScheme()
@@ -198,11 +173,11 @@ func TestCreateK8sSecret(t *testing.T) {
 	client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjects...).Build()
 	reconciler := newReconciler(client, scheme, "node1")
 
-	// secret already exists
-	err = reconciler.createK8sSecret(context.TODO(), "my-secret", "default", nil, labels, annotations, corev1.SecretTypeOpaque)
+	// secret already exists, just update it.
+	err = reconciler.createOrUpdateK8sSecret(context.TODO(), "my-secret", "default", nil, labels, annotations, corev1.SecretTypeOpaque)
 	g.Expect(err).NotTo(HaveOccurred())
 
-	err = reconciler.createK8sSecret(context.TODO(), "my-secret2", "default", nil, labels, annotations, corev1.SecretTypeOpaque)
+	err = reconciler.createOrUpdateK8sSecret(context.TODO(), "my-secret2", "default", nil, labels, annotations, corev1.SecretTypeOpaque)
 	g.Expect(err).NotTo(HaveOccurred())
 	secret := &corev1.Secret{}
 	err = client.Get(context.TODO(), types.NamespacedName{Name: "my-secret2", Namespace: "default"}, secret)
diff --git a/controllers/tokenrequest/tokenrequest.go b/controllers/tokenrequest/tokenrequest.go
deleted file mode 100644
index 2d3bc70c4..000000000
--- a/controllers/tokenrequest/tokenrequest.go
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
-Copyright 2022 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package tokenrequest holds the RBAC permission annotations for the controller
-// to create a serviceaccount token and pass it as part of Mount Request.
-// ref: https://kubernetes-csi.github.io/docs/token-requests.html
-package tokenrequest
-
-// +kubebuilder:rbac:groups="",resources="serviceaccounts/token",verbs=create
diff --git a/go.mod b/go.mod
index 43767966a..3d0a367dc 100644
--- a/go.mod
+++ b/go.mod
@@ -20,7 +20,6 @@ require (
 	k8s.io/client-go v0.26.4
 	k8s.io/klog/v2 v2.100.1
 	k8s.io/mount-utils v0.26.4
-	k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
 	monis.app/mlog v0.0.2
 	sigs.k8s.io/controller-runtime v0.14.6
 )
@@ -29,6 +28,7 @@ require (
 	github.com/blang/semver/v4 v4.0.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
+	k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect
 )
 
 require (
@@ -76,7 +76,7 @@ require (
 	go.uber.org/multierr v1.8.0 // indirect
 	go.uber.org/zap v1.24.0 // indirect
 	golang.org/x/crypto v0.31.0
-	golang.org/x/net v0.24.0 // indirect
+	golang.org/x/net v0.33.0 // indirect
 	golang.org/x/oauth2 v0.7.0 // indirect
 	golang.org/x/sys v0.28.0 // indirect
 	golang.org/x/term v0.27.0 // indirect
diff --git a/go.sum b/go.sum
index ff036e2e6..ba8f74a17 100644
--- a/go.sum
+++ b/go.sum
@@ -553,8 +553,8 @@ golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwY
 golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
 golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
-golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
diff --git a/hack/tools/go.mod b/hack/tools/go.mod
index 2ac0acaa5..193184b6f 100644
--- a/hack/tools/go.mod
+++ b/hack/tools/go.mod
@@ -197,10 +197,10 @@ require (
 	golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
 	golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 // indirect
 	golang.org/x/mod v0.20.0 // indirect
-	golang.org/x/net v0.28.0 // indirect
-	golang.org/x/sync v0.8.0 // indirect
-	golang.org/x/sys v0.24.0 // indirect
-	golang.org/x/text v0.17.0 // indirect
+	golang.org/x/net v0.33.0 // indirect
+	golang.org/x/sync v0.10.0 // indirect
+	golang.org/x/sys v0.28.0 // indirect
+	golang.org/x/text v0.21.0 // indirect
 	golang.org/x/tools v0.24.0 // indirect
 	gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
diff --git a/hack/tools/go.sum b/hack/tools/go.sum
index 4a380ac30..926e7e24d 100644
--- a/hack/tools/go.sum
+++ b/hack/tools/go.sum
@@ -733,8 +733,8 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
 golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
 golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
 golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
-golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -758,8 +758,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
-golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -819,8 +819,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
-golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -838,8 +838,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
 golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
-golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
 golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
diff --git a/manifest_staging/charts/secrets-store-csi-driver/templates/csidriver.yaml b/manifest_staging/charts/secrets-store-csi-driver/templates/csidriver.yaml
index 5aef2b946..830b5be96 100644
--- a/manifest_staging/charts/secrets-store-csi-driver/templates/csidriver.yaml
+++ b/manifest_staging/charts/secrets-store-csi-driver/templates/csidriver.yaml
@@ -11,6 +11,7 @@ spec:
   volumeLifecycleModes: 
   - Ephemeral
   {{- if and (semverCompare ">=1.20-0" .Capabilities.KubeVersion.Version) .Values.tokenRequests }}
+  requiresRepublish: true
   tokenRequests:
     {{- toYaml .Values.tokenRequests | nindent 2 }}
   {{- end }}
diff --git a/manifest_staging/charts/secrets-store-csi-driver/templates/role-rotation.yaml b/manifest_staging/charts/secrets-store-csi-driver/templates/role-rotation.yaml
deleted file mode 100644
index 64bbf28fa..000000000
--- a/manifest_staging/charts/secrets-store-csi-driver/templates/role-rotation.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-{{ if .Values.enableSecretRotation }}
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: secretproviderrotation-role
-  labels:
-{{ include "sscd.labels" . | indent 4 }}
-rules:
-- apiGroups:
-  - ""
-  resources:
-  - secrets
-  verbs:
-  - get
-  - list
-  - watch
-{{ end }}
diff --git a/manifest_staging/charts/secrets-store-csi-driver/templates/role-rotation_binding.yaml b/manifest_staging/charts/secrets-store-csi-driver/templates/role-rotation_binding.yaml
deleted file mode 100644
index ae7908e16..000000000
--- a/manifest_staging/charts/secrets-store-csi-driver/templates/role-rotation_binding.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-{{ if .Values.enableSecretRotation }}
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: secretproviderrotation-rolebinding
-  labels:
-{{ include "sscd.labels" . | indent 4 }}
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: secretproviderrotation-role
-subjects:
-- kind: ServiceAccount
-  name: secrets-store-csi-driver
-  namespace: {{ .Release.Namespace }}
-{{ end }}
diff --git a/manifest_staging/charts/secrets-store-csi-driver/templates/role-tokenrequest.yaml b/manifest_staging/charts/secrets-store-csi-driver/templates/role-tokenrequest.yaml
deleted file mode 100644
index f81594ea0..000000000
--- a/manifest_staging/charts/secrets-store-csi-driver/templates/role-tokenrequest.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-{{ if .Values.tokenRequests }}
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: secretprovidertokenrequest-role
-  labels:
-{{ include "sscd.labels" . | indent 4 }}
-rules:
-- apiGroups:
-  - ""
-  resources:
-  - serviceaccounts/token
-  verbs:
-  - create
-{{ end }}
diff --git a/manifest_staging/charts/secrets-store-csi-driver/templates/role-tokenrequest_binding.yaml b/manifest_staging/charts/secrets-store-csi-driver/templates/role-tokenrequest_binding.yaml
deleted file mode 100644
index 76abcb28b..000000000
--- a/manifest_staging/charts/secrets-store-csi-driver/templates/role-tokenrequest_binding.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-{{ if .Values.tokenRequests }}
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: secretprovidertokenrequest-rolebinding
-  labels:
-{{ include "sscd.labels" . | indent 4 }}
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: secretprovidertokenrequest-role
-subjects:
-- kind: ServiceAccount
-  name: secrets-store-csi-driver
-  namespace: {{ .Release.Namespace }}
-{{ end }}
diff --git a/manifest_staging/deploy/csidriver.yaml b/manifest_staging/deploy/csidriver.yaml
index ac3445923..19b95c1cc 100644
--- a/manifest_staging/deploy/csidriver.yaml
+++ b/manifest_staging/deploy/csidriver.yaml
@@ -7,3 +7,4 @@ spec:
   attachRequired: false
   volumeLifecycleModes:
   - Ephemeral
+  requiresRepublish: true
diff --git a/manifest_staging/deploy/rbac-secretproviderrotation.yaml b/manifest_staging/deploy/rbac-secretproviderrotation.yaml
deleted file mode 100644
index 24ecde822..000000000
--- a/manifest_staging/deploy/rbac-secretproviderrotation.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: secretproviderrotation-role
-rules:
-- apiGroups:
-  - ""
-  resources:
-  - secrets
-  verbs:
-  - get
-  - list
-  - watch
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: secretproviderrotation-rolebinding
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: secretproviderrotation-role
-subjects:
-- kind: ServiceAccount
-  name: secrets-store-csi-driver
-  namespace: kube-system
diff --git a/manifest_staging/deploy/rbac-secretprovidertokenrequest.yaml b/manifest_staging/deploy/rbac-secretprovidertokenrequest.yaml
deleted file mode 100644
index b97ff3d55..000000000
--- a/manifest_staging/deploy/rbac-secretprovidertokenrequest.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: secretprovidertokenrequest-role
-rules:
-- apiGroups:
-  - ""
-  resources:
-  - serviceaccounts/token
-  verbs:
-  - create
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: secretprovidertokenrequest-rolebinding
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: secretprovidertokenrequest-role
-subjects:
-- kind: ServiceAccount
-  name: secrets-store-csi-driver
-  namespace: kube-system
diff --git a/pkg/k8s/secret.go b/pkg/k8s/secret.go
deleted file mode 100644
index 886e93c31..000000000
--- a/pkg/k8s/secret.go
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package k8s
-
-import (
-	"fmt"
-
-	corev1 "k8s.io/api/core/v1"
-	apierrors "k8s.io/apimachinery/pkg/api/errors"
-	"k8s.io/apimachinery/pkg/runtime/schema"
-	"k8s.io/client-go/tools/cache"
-)
-
-// SecretLister is a store used to list secrets
-type SecretLister struct {
-	cache.Store
-}
-
-// GetWithKey returns secret with key from the informer cache
-func (sl *SecretLister) GetWithKey(key string) (*corev1.Secret, error) {
-	sec, exists, err := sl.GetByKey(key)
-	if err != nil {
-		return nil, err
-	}
-	if !exists {
-		return nil, apierrors.NewNotFound(schema.GroupResource{Group: corev1.GroupName, Resource: "secrets"}, key)
-	}
-	secret, ok := sec.(*corev1.Secret)
-	if !ok {
-		return nil, fmt.Errorf("failed to cast %T to %s", sec, "secret")
-	}
-	return secret, nil
-}
diff --git a/pkg/k8s/store.go b/pkg/k8s/store.go
deleted file mode 100644
index 684d131ef..000000000
--- a/pkg/k8s/store.go
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package k8s
-
-import (
-	"fmt"
-	"time"
-
-	"sigs.k8s.io/secrets-store-csi-driver/controllers"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	coreInformers "k8s.io/client-go/informers/core/v1"
-	"k8s.io/client-go/informers/internalinterfaces"
-	"k8s.io/client-go/kubernetes"
-	"k8s.io/client-go/tools/cache"
-)
-
-// Informer holds the shared index informers
-type Informer struct {
-	NodePublishSecretRefSecret cache.SharedIndexInformer
-}
-
-// Lister holds the object lister
-type Lister struct {
-	NodePublishSecretRefSecret SecretLister
-}
-
-// Store for secrets with label 'secrets-store.csi.k8s.io/used'
-type Store interface {
-	// GetNodePublishSecretRefSecret returns the NodePublishSecretRef secret matching name and namespace
-	GetNodePublishSecretRefSecret(name, namespace string) (*corev1.Secret, error)
-	// Run initializes and runs the informers
-	Run(stopCh <-chan struct{}) error
-}
-
-type k8sStore struct {
-	informers *Informer
-	listers   *Lister
-}
-
-// New returns store.Store for NodePublishSecretRefSecret
-func New(kubeClient kubernetes.Interface, resyncPeriod time.Duration) (Store, error) {
-	store := &k8sStore{
-		informers: &Informer{},
-		listers:   &Lister{},
-	}
-
-	store.informers.NodePublishSecretRefSecret = newNodePublishSecretRefSecretInformer(kubeClient, resyncPeriod)
-	store.listers.NodePublishSecretRefSecret.Store = store.informers.NodePublishSecretRefSecret.GetStore()
-
-	return store, nil
-}
-
-// Run initiates the sync of the informers and caches
-func (s k8sStore) Run(stopCh <-chan struct{}) error {
-	return s.informers.run(stopCh)
-}
-
-// GetNodePublishSecretRefSecret returns the NodePublishSecretRef secret matching name and namespace
-func (s k8sStore) GetNodePublishSecretRefSecret(name, namespace string) (*corev1.Secret, error) {
-	return s.listers.NodePublishSecretRefSecret.GetWithKey(fmt.Sprintf("%s/%s", namespace, name))
-}
-
-func (i *Informer) run(stopCh <-chan struct{}) error {
-	go i.NodePublishSecretRefSecret.Run(stopCh)
-
-	synced := []cache.InformerSynced{
-		i.NodePublishSecretRefSecret.HasSynced,
-	}
-	if !cache.WaitForCacheSync(stopCh, synced...) {
-		return fmt.Errorf("failed to sync informer caches")
-	}
-	return nil
-}
-
-// newNodePublishSecretRefSecretInformer returns a NodePublishSecretRef informer
-func newNodePublishSecretRefSecretInformer(kubeClient kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
-	return coreInformers.NewFilteredSecretInformer(
-		kubeClient,
-		corev1.NamespaceAll,
-		resyncPeriod,
-		cache.Indexers{},
-		usedFilterForSecret(),
-	)
-}
-
-// usedFilterForSecret returns tweak options to filter using used label (secrets-store.csi.k8s.io/used=true).
-// this label will need to be configured by user for NodePublishSecretRef secrets.
-func usedFilterForSecret() internalinterfaces.TweakListOptionsFunc {
-	return func(options *metav1.ListOptions) {
-		options.LabelSelector = fmt.Sprintf("%s=true", controllers.SecretUsedLabel)
-	}
-}
diff --git a/pkg/k8s/store_test.go b/pkg/k8s/store_test.go
deleted file mode 100644
index 28d4fe7ac..000000000
--- a/pkg/k8s/store_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package k8s
-
-import (
-	"context"
-	"testing"
-	"time"
-
-	"sigs.k8s.io/secrets-store-csi-driver/controllers"
-
-	. "github.com/onsi/gomega"
-	corev1 "k8s.io/api/core/v1"
-	apierrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/wait"
-	"k8s.io/client-go/kubernetes/fake"
-)
-
-func TestGetNodePublishSecretRefSecret(t *testing.T) {
-	g := NewWithT(t)
-
-	kubeClient := fake.NewSimpleClientset()
-
-	testStore, err := New(kubeClient, 1*time.Millisecond)
-	g.Expect(err).NotTo(HaveOccurred())
-	err = testStore.Run(wait.NeverStop)
-	g.Expect(err).NotTo(HaveOccurred())
-
-	// Get a secret that's not found
-	_, err = testStore.GetNodePublishSecretRefSecret("secret1", "default")
-	g.Expect(err).To(HaveOccurred())
-	g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
-
-	secretToAdd := &corev1.Secret{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:      "secret1",
-			Namespace: "default",
-			Labels: map[string]string{
-				controllers.SecretUsedLabel: "true",
-			},
-		},
-	}
-
-	_, err = kubeClient.CoreV1().Secrets("default").Create(context.TODO(), secretToAdd, metav1.CreateOptions{})
-	g.Expect(err).NotTo(HaveOccurred())
-
-	waitForInformerCacheSync()
-
-	secret, err := testStore.GetNodePublishSecretRefSecret("secret1", "default")
-	g.Expect(err).NotTo(HaveOccurred())
-	g.Expect(secret).NotTo(BeNil())
-	g.Expect(secret.Name).To(Equal("secret1"))
-}
-
-// waitForInformerCacheSync waits for the test informers cache to be synced
-func waitForInformerCacheSync() {
-	time.Sleep(200 * time.Millisecond)
-}
diff --git a/pkg/k8s/token.go b/pkg/k8s/token.go
deleted file mode 100644
index 047b7ef4d..000000000
--- a/pkg/k8s/token.go
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
-Copyright 2022 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package k8s
-
-import (
-	"encoding/json"
-	"fmt"
-	"time"
-
-	authenticationv1 "k8s.io/api/authentication/v1"
-	apierrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/types"
-	kubeinformers "k8s.io/client-go/informers"
-	storageinformers "k8s.io/client-go/informers/storage/v1"
-	"k8s.io/client-go/kubernetes"
-	storagelisters "k8s.io/client-go/listers/storage/v1"
-	"k8s.io/client-go/tools/cache"
-	"k8s.io/klog/v2"
-	"sigs.k8s.io/secrets-store-csi-driver/pkg/k8s/token"
-)
-
-// TokenClient is a client for Kubernetes Token API
-type TokenClient struct {
-	driverName        string
-	csiDriverInformer storageinformers.CSIDriverInformer
-	csiDriverLister   storagelisters.CSIDriverLister
-	manager           *token.Manager
-}
-
-// NewTokenClient creates a new TokenClient
-// The client will be used to request a token for token requests configured in the CSIDriver.
-func NewTokenClient(kubeClient kubernetes.Interface, driverName string, resyncPeriod time.Duration) *TokenClient {
-	kubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(
-		kubeClient,
-		resyncPeriod,
-		kubeinformers.WithTweakListOptions(
-			func(options *metav1.ListOptions) {
-				options.FieldSelector = fmt.Sprintf("metadata.name=%s", driverName)
-			},
-		),
-	)
-
-	csiDriverInformer := kubeInformerFactory.Storage().V1().CSIDrivers()
-	csiDriverLister := csiDriverInformer.Lister()
-
-	return &TokenClient{
-		driverName:        driverName,
-		csiDriverInformer: csiDriverInformer,
-		csiDriverLister:   csiDriverLister,
-		manager:           token.NewManager(kubeClient),
-	}
-}
-
-// Run initiates the sync of the informers and caches
-func (c *TokenClient) Run(stopCh <-chan struct{}) error {
-	go c.csiDriverInformer.Informer().Run(stopCh)
-
-	if !cache.WaitForCacheSync(stopCh, c.csiDriverInformer.Informer().HasSynced) {
-		return fmt.Errorf("failed to sync informer caches")
-	}
-	return nil
-}
-
-// PodServiceAccountTokenAttrs returns the token for the pod service account that can be bound to the pod.
-// This token will be sent to the providers and is of the format:
-//
-//	"csi.storage.k8s.io/serviceAccount.tokens": {
-//	  <audience>: {
-//	    'token': <token>,
-//	    'expirationTimestamp': <expiration timestamp in RFC3339 format>,
-//	  },
-//	  ...
-//	}
-//
-// ref: https://kubernetes-csi.github.io/docs/token-requests.html#usage
-func (c *TokenClient) PodServiceAccountTokenAttrs(namespace, podName, serviceAccountName string, podUID types.UID) (map[string]string, error) {
-	csiDriver, err := c.csiDriverLister.Get(c.driverName)
-	if err != nil {
-		if apierrors.IsNotFound(err) {
-			klog.V(5).InfoS("CSIDriver not found, not adding service account token information", "driver", c.driverName)
-			return nil, nil
-		}
-		return nil, err
-	}
-
-	if len(csiDriver.Spec.TokenRequests) == 0 {
-		return nil, nil
-	}
-
-	outputs := map[string]authenticationv1.TokenRequestStatus{}
-	for _, tokenRequest := range csiDriver.Spec.TokenRequests {
-		audience := tokenRequest.Audience
-		audiences := []string{audience}
-		if audience == "" {
-			audiences = []string{}
-		}
-		tr := &authenticationv1.TokenRequest{
-			Spec: authenticationv1.TokenRequestSpec{
-				ExpirationSeconds: tokenRequest.ExpirationSeconds,
-				Audiences:         audiences,
-				BoundObjectRef: &authenticationv1.BoundObjectReference{
-					APIVersion: "v1",
-					Kind:       "Pod",
-					Name:       podName,
-					UID:        podUID,
-				},
-			},
-		}
-
-		tr, err := c.GetServiceAccountToken(namespace, serviceAccountName, tr)
-		if err != nil {
-			return nil, err
-		}
-		outputs[audience] = tr.Status
-	}
-
-	klog.V(4).InfoS("Fetched service account token attrs for CSIDriver", "driver", c.driverName, "podUID", podUID)
-	tokens, err := json.Marshal(outputs)
-	if err != nil {
-		return nil, err
-	}
-
-	return map[string]string{
-		"csi.storage.k8s.io/serviceAccount.tokens": string(tokens),
-	}, nil
-}
-
-// GetServiceAccountToken gets a service account token for a pod from cache or
-// from the TokenRequest API. This process is as follows:
-// * Check the cache for the current token request.
-// * If the token exists and does not require a refresh, return the current token.
-// * Attempt to refresh the token.
-// * If the token is refreshed successfully, save it in the cache and return the token.
-// * If refresh fails and the old token is still valid, log an error and return the old token.
-// * If refresh fails and the old token is no longer valid, return an error
-func (c *TokenClient) GetServiceAccountToken(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
-	return c.manager.GetServiceAccountToken(namespace, name, tr)
-}
-
-// DeleteServiceAccountToken should be invoked when pod got deleted. It simply
-// clean token manager cache.
-func (c *TokenClient) DeleteServiceAccountToken(podUID types.UID) {
-	c.manager.DeleteServiceAccountToken(podUID)
-}
diff --git a/pkg/k8s/token/token_manager.go b/pkg/k8s/token/token_manager.go
deleted file mode 100644
index fde7237c8..000000000
--- a/pkg/k8s/token/token_manager.go
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Vendored from kubernetes/pkg/kubelet/token/token_manager.go
-//  * tag: v1.25.3,
-//  * commit: 53ce79a18ab2665488f7c55c6a1cab8e7a09aced
-//  * link: https://github.com/kubernetes/kubernetes/blob/53ce79a18ab2665488f7c55c6a1cab8e7a09aced/pkg/kubelet/token/token_manager.go
-
-// Package token implements a manager of serviceaccount tokens for pods running
-// on the node.
-package token
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"math/rand"
-	"sync"
-	"time"
-
-	authenticationv1 "k8s.io/api/authentication/v1"
-	apierrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/types"
-	"k8s.io/apimachinery/pkg/util/wait"
-	clientset "k8s.io/client-go/kubernetes"
-	"k8s.io/klog/v2"
-	"k8s.io/utils/clock"
-)
-
-const (
-	maxTTL    = 24 * time.Hour
-	gcPeriod  = time.Minute
-	maxJitter = 10 * time.Second
-)
-
-// NewManager returns a new token manager.
-func NewManager(c clientset.Interface) *Manager {
-	// check whether the server supports token requests so we can give a more helpful error message
-	supported := false
-	once := &sync.Once{}
-	tokenRequestsSupported := func() bool {
-		once.Do(func() {
-			resources, err := c.Discovery().ServerResourcesForGroupVersion("v1")
-			if err != nil {
-				return
-			}
-			for idx := range resources.APIResources {
-				resource := &resources.APIResources[idx]
-				if resource.Name == "serviceaccounts/token" {
-					supported = true
-					return
-				}
-			}
-		})
-		return supported
-	}
-
-	m := &Manager{
-		getToken: func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
-			if c == nil {
-				return nil, errors.New("cannot use TokenManager when kubelet is in standalone mode")
-			}
-			tokenRequest, err := c.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr, metav1.CreateOptions{})
-			if apierrors.IsNotFound(err) && !tokenRequestsSupported() {
-				return nil, fmt.Errorf("the API server does not have TokenRequest endpoints enabled")
-			}
-			return tokenRequest, err
-		},
-		cache: make(map[string]*authenticationv1.TokenRequest),
-		clock: clock.RealClock{},
-	}
-	go wait.Forever(m.cleanup, gcPeriod)
-	return m
-}
-
-// Manager manages service account tokens for pods.
-type Manager struct {
-
-	// cacheMutex guards the cache
-	cacheMutex sync.RWMutex
-	cache      map[string]*authenticationv1.TokenRequest
-
-	// mocked for testing
-	getToken func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error)
-	clock    clock.Clock
-}
-
-// GetServiceAccountToken gets a service account token for a pod from cache or
-// from the TokenRequest API. This process is as follows:
-// * Check the cache for the current token request.
-// * If the token exists and does not require a refresh, return the current token.
-// * Attempt to refresh the token.
-// * If the token is refreshed successfully, save it in the cache and return the token.
-// * If refresh fails and the old token is still valid, log an error and return the old token.
-// * If refresh fails and the old token is no longer valid, return an error
-func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
-	key := keyFunc(name, namespace, tr)
-
-	ctr, ok := m.get(key)
-
-	if ok && !m.requiresRefresh(ctr) {
-		return ctr, nil
-	}
-
-	tr, err := m.getToken(name, namespace, tr)
-	if err != nil {
-		switch {
-		case !ok:
-			return nil, fmt.Errorf("failed to fetch token: %w", err)
-		case m.expired(ctr):
-			return nil, fmt.Errorf("token %s expired and refresh failed: %w", key, err)
-		default:
-			klog.ErrorS(err, "Couldn't update token", "cacheKey", key)
-			return ctr, nil
-		}
-	}
-
-	m.set(key, tr)
-	return tr, nil
-}
-
-// DeleteServiceAccountToken should be invoked when pod got deleted. It simply
-// clean token manager cache.
-func (m *Manager) DeleteServiceAccountToken(podUID types.UID) {
-	m.cacheMutex.Lock()
-	defer m.cacheMutex.Unlock()
-	for k, tr := range m.cache {
-		if tr.Spec.BoundObjectRef.UID == podUID {
-			delete(m.cache, k)
-		}
-	}
-}
-
-func (m *Manager) cleanup() {
-	m.cacheMutex.Lock()
-	defer m.cacheMutex.Unlock()
-	for k, tr := range m.cache {
-		if m.expired(tr) {
-			delete(m.cache, k)
-		}
-	}
-}
-
-func (m *Manager) get(key string) (*authenticationv1.TokenRequest, bool) {
-	m.cacheMutex.RLock()
-	defer m.cacheMutex.RUnlock()
-	ctr, ok := m.cache[key]
-	return ctr, ok
-}
-
-func (m *Manager) set(key string, tr *authenticationv1.TokenRequest) {
-	m.cacheMutex.Lock()
-	defer m.cacheMutex.Unlock()
-	m.cache[key] = tr
-}
-
-func (m *Manager) expired(t *authenticationv1.TokenRequest) bool {
-	return m.clock.Now().After(t.Status.ExpirationTimestamp.Time)
-}
-
-// requiresRefresh returns true if the token is older than 80% of its total
-// ttl, or if the token is older than 24 hours.
-func (m *Manager) requiresRefresh(tr *authenticationv1.TokenRequest) bool {
-	if tr.Spec.ExpirationSeconds == nil {
-		cpy := tr.DeepCopy()
-		cpy.Status.Token = ""
-		klog.ErrorS(nil, "Expiration seconds was nil for token request", "tokenRequest", cpy)
-		return false
-	}
-	now := m.clock.Now()
-	exp := tr.Status.ExpirationTimestamp.Time
-	iat := exp.Add(-1 * time.Duration(*tr.Spec.ExpirationSeconds) * time.Second)
-
-	// #nosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
-	jitter := time.Duration(rand.Float64()*maxJitter.Seconds()) * time.Second
-	if now.After(iat.Add(maxTTL - jitter)) {
-		return true
-	}
-	// Require a refresh if within 20% of the TTL plus a jitter from the expiration time.
-	if now.After(exp.Add(-1*time.Duration((*tr.Spec.ExpirationSeconds*20)/100)*time.Second - jitter)) {
-		return true
-	}
-	return false
-}
-
-// keys should be nonconfidential and safe to log
-func keyFunc(name, namespace string, tr *authenticationv1.TokenRequest) string {
-	var exp int64
-	if tr.Spec.ExpirationSeconds != nil {
-		exp = *tr.Spec.ExpirationSeconds
-	}
-
-	var ref authenticationv1.BoundObjectReference
-	if tr.Spec.BoundObjectRef != nil {
-		ref = *tr.Spec.BoundObjectRef
-	}
-
-	return fmt.Sprintf("%q/%q/%#v/%#v/%#v", name, namespace, tr.Spec.Audiences, exp, ref)
-}
diff --git a/pkg/k8s/token/token_manager_test.go b/pkg/k8s/token/token_manager_test.go
deleted file mode 100644
index 1de6e8c80..000000000
--- a/pkg/k8s/token/token_manager_test.go
+++ /dev/null
@@ -1,610 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Vendored from kubernetes/pkg/kubelet/token/token_manager_test.go
-//  * tag: v1.25.3,
-//  * commit: 53ce79a18ab2665488f7c55c6a1cab8e7a09aced
-//  * link: https://github.com/kubernetes/kubernetes/blob/53ce79a18ab2665488f7c55c6a1cab8e7a09aced/pkg/kubelet/token/token_manager_test.go
-
-package token
-
-import (
-	"fmt"
-	"testing"
-	"time"
-
-	authenticationv1 "k8s.io/api/authentication/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/types"
-	testingclock "k8s.io/utils/clock/testing"
-)
-
-func TestTokenCachingAndExpiration(t *testing.T) {
-	type suite struct {
-		clock *testingclock.FakeClock
-		tg    *fakeTokenGetter
-		mgr   *Manager
-	}
-
-	cases := []struct {
-		name string
-		exp  time.Duration
-		f    func(t *testing.T, s *suite)
-	}{
-		{
-			name: "rotate hour token expires in the last 12 minutes",
-			exp:  time.Hour,
-			f: func(t *testing.T, s *suite) {
-				s.clock.SetTime(s.clock.Now().Add(50 * time.Minute))
-				if _, err := s.mgr.GetServiceAccountToken("a", "b", getTokenRequest()); err != nil {
-					t.Fatalf("unexpected error: %v", err)
-				}
-				if s.tg.count != 2 {
-					t.Fatalf("expected token to be refreshed: call count was %d", s.tg.count)
-				}
-			},
-		},
-		{
-			name: "rotate 24 hour token that expires in 40 hours",
-			exp:  40 * time.Hour,
-			f: func(t *testing.T, s *suite) {
-				s.clock.SetTime(s.clock.Now().Add(25 * time.Hour))
-				if _, err := s.mgr.GetServiceAccountToken("a", "b", getTokenRequest()); err != nil {
-					t.Fatalf("unexpected error: %v", err)
-				}
-				if s.tg.count != 2 {
-					t.Fatalf("expected token to be refreshed: call count was %d", s.tg.count)
-				}
-			},
-		},
-		{
-			name: "rotate hour token fails, old token is still valid, doesn't error",
-			exp:  time.Hour,
-			f: func(t *testing.T, s *suite) {
-				s.clock.SetTime(s.clock.Now().Add(50 * time.Minute))
-				tg := &fakeTokenGetter{
-					err: fmt.Errorf("err"),
-				}
-				s.mgr.getToken = tg.getToken
-				tr, err := s.mgr.GetServiceAccountToken("a", "b", getTokenRequest())
-				if err != nil {
-					t.Fatalf("unexpected error: %v", err)
-				}
-				if tr.Status.Token != "foo" {
-					t.Fatalf("unexpected token: %v", tr.Status.Token)
-				}
-			},
-		},
-	}
-
-	for _, c := range cases {
-		t.Run(c.name, func(t *testing.T) {
-			clock := testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
-			expSecs := int64(c.exp.Seconds())
-			s := &suite{
-				clock: clock,
-				mgr:   NewManager(nil),
-				tg: &fakeTokenGetter{
-					tr: &authenticationv1.TokenRequest{
-						Spec: authenticationv1.TokenRequestSpec{
-							ExpirationSeconds: &expSecs,
-						},
-						Status: authenticationv1.TokenRequestStatus{
-							Token:               "foo",
-							ExpirationTimestamp: metav1.Time{Time: clock.Now().Add(c.exp)},
-						},
-					},
-				},
-			}
-			s.mgr.getToken = s.tg.getToken
-			s.mgr.clock = s.clock
-			if _, err := s.mgr.GetServiceAccountToken("a", "b", getTokenRequest()); err != nil {
-				t.Fatalf("unexpected error: %v", err)
-			}
-			if s.tg.count != 1 {
-				t.Fatalf("unexpected client call, got: %d, want: 1", s.tg.count)
-			}
-
-			if _, err := s.mgr.GetServiceAccountToken("a", "b", getTokenRequest()); err != nil {
-				t.Fatalf("unexpected error: %v", err)
-			}
-			if s.tg.count != 1 {
-				t.Fatalf("expected token to be served from cache: saw %d", s.tg.count)
-			}
-
-			c.f(t, s)
-		})
-	}
-}
-
-func TestRequiresRefresh(t *testing.T) {
-	start := time.Now()
-	cases := []struct {
-		now, exp      time.Time
-		expectRefresh bool
-		requestTweaks func(*authenticationv1.TokenRequest)
-	}{
-		{
-			now:           start.Add(10 * time.Minute),
-			exp:           start.Add(60 * time.Minute),
-			expectRefresh: false,
-		},
-		{
-			now:           start.Add(50 * time.Minute),
-			exp:           start.Add(60 * time.Minute),
-			expectRefresh: true,
-		},
-		{
-			now:           start.Add(25 * time.Hour),
-			exp:           start.Add(60 * time.Hour),
-			expectRefresh: true,
-		},
-		{
-			now:           start.Add(70 * time.Minute),
-			exp:           start.Add(60 * time.Minute),
-			expectRefresh: true,
-		},
-		{
-			// expiry will be overwritten by the tweak below.
-			now:           start.Add(0 * time.Minute),
-			exp:           start.Add(60 * time.Minute),
-			expectRefresh: false,
-			requestTweaks: func(tr *authenticationv1.TokenRequest) {
-				tr.Spec.ExpirationSeconds = nil
-			},
-		},
-	}
-
-	for i, c := range cases {
-		t.Run(fmt.Sprint(i), func(t *testing.T) {
-			clock := testingclock.NewFakeClock(c.now)
-			secs := int64(c.exp.Sub(start).Seconds())
-			tr := &authenticationv1.TokenRequest{
-				Spec: authenticationv1.TokenRequestSpec{
-					ExpirationSeconds: &secs,
-				},
-				Status: authenticationv1.TokenRequestStatus{
-					ExpirationTimestamp: metav1.Time{Time: c.exp},
-				},
-			}
-
-			if c.requestTweaks != nil {
-				c.requestTweaks(tr)
-			}
-
-			mgr := NewManager(nil)
-			mgr.clock = clock
-
-			rr := mgr.requiresRefresh(tr)
-			if rr != c.expectRefresh {
-				t.Fatalf("unexpected requiresRefresh result, got: %v, want: %v", rr, c.expectRefresh)
-			}
-		})
-	}
-}
-
-func TestDeleteServiceAccountToken(t *testing.T) {
-	type request struct {
-		name, namespace string
-		tr              authenticationv1.TokenRequest
-		shouldFail      bool
-	}
-
-	cases := []struct {
-		name         string
-		requestIndex []int
-		deletePodUID []types.UID
-		expLeftIndex []int
-	}{
-		{
-			name:         "delete none with all success requests",
-			requestIndex: []int{0, 1, 2},
-			expLeftIndex: []int{0, 1, 2},
-		},
-		{
-			name:         "delete one with all success requests",
-			requestIndex: []int{0, 1, 2},
-			deletePodUID: []types.UID{"fake-uid-1"},
-			expLeftIndex: []int{1, 2},
-		},
-		{
-			name:         "delete two with all success requests",
-			requestIndex: []int{0, 1, 2},
-			deletePodUID: []types.UID{"fake-uid-1", "fake-uid-3"},
-			expLeftIndex: []int{1},
-		},
-		{
-			name:         "delete all with all suceess requests",
-			requestIndex: []int{0, 1, 2},
-			deletePodUID: []types.UID{"fake-uid-1", "fake-uid-2", "fake-uid-3"},
-		},
-		{
-			name:         "delete no pod with failed requests",
-			requestIndex: []int{0, 1, 2, 3},
-			deletePodUID: []types.UID{},
-			expLeftIndex: []int{0, 1, 2},
-		},
-		{
-			name:         "delete other pod with failed requests",
-			requestIndex: []int{0, 1, 2, 3},
-			deletePodUID: []types.UID{"fake-uid-2"},
-			expLeftIndex: []int{0, 2},
-		},
-		{
-			name:         "delete no pod with request which success after failure",
-			requestIndex: []int{0, 1, 2, 3, 4},
-			deletePodUID: []types.UID{},
-			expLeftIndex: []int{0, 1, 2, 4},
-		},
-		{
-			name:         "delete the pod which success after failure",
-			requestIndex: []int{0, 1, 2, 3, 4},
-			deletePodUID: []types.UID{"fake-uid-4"},
-			expLeftIndex: []int{0, 1, 2},
-		},
-		{
-			name:         "delete other pod with request which success after failure",
-			requestIndex: []int{0, 1, 2, 3, 4},
-			deletePodUID: []types.UID{"fake-uid-1"},
-			expLeftIndex: []int{1, 2, 4},
-		},
-		{
-			name:         "delete some pod not in the set",
-			requestIndex: []int{0, 1, 2},
-			deletePodUID: []types.UID{"fake-uid-100", "fake-uid-200"},
-			expLeftIndex: []int{0, 1, 2},
-		},
-	}
-
-	for _, c := range cases {
-		t.Run(c.name, func(t *testing.T) {
-			requests := []request{
-				{
-					name:      "fake-name-1",
-					namespace: "fake-namespace-1",
-					tr: authenticationv1.TokenRequest{
-						Spec: authenticationv1.TokenRequestSpec{
-							BoundObjectRef: &authenticationv1.BoundObjectReference{
-								UID:  "fake-uid-1",
-								Name: "fake-name-1",
-							},
-						},
-					},
-					shouldFail: false,
-				},
-				{
-					name:      "fake-name-2",
-					namespace: "fake-namespace-2",
-					tr: authenticationv1.TokenRequest{
-						Spec: authenticationv1.TokenRequestSpec{
-							BoundObjectRef: &authenticationv1.BoundObjectReference{
-								UID:  "fake-uid-2",
-								Name: "fake-name-2",
-							},
-						},
-					},
-					shouldFail: false,
-				},
-				{
-					name:      "fake-name-3",
-					namespace: "fake-namespace-3",
-					tr: authenticationv1.TokenRequest{
-						Spec: authenticationv1.TokenRequestSpec{
-							BoundObjectRef: &authenticationv1.BoundObjectReference{
-								UID:  "fake-uid-3",
-								Name: "fake-name-3",
-							},
-						},
-					},
-					shouldFail: false,
-				},
-				{
-					name:      "fake-name-4",
-					namespace: "fake-namespace-4",
-					tr: authenticationv1.TokenRequest{
-						Spec: authenticationv1.TokenRequestSpec{
-							BoundObjectRef: &authenticationv1.BoundObjectReference{
-								UID:  "fake-uid-4",
-								Name: "fake-name-4",
-							},
-						},
-					},
-					shouldFail: true,
-				},
-				{
-					// exactly the same with last one, besides it will success
-					name:      "fake-name-4",
-					namespace: "fake-namespace-4",
-					tr: authenticationv1.TokenRequest{
-						Spec: authenticationv1.TokenRequestSpec{
-							BoundObjectRef: &authenticationv1.BoundObjectReference{
-								UID:  "fake-uid-4",
-								Name: "fake-name-4",
-							},
-						},
-					},
-					shouldFail: false,
-				},
-			}
-			testMgr := NewManager(nil)
-			testMgr.clock = testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
-
-			successGetToken := func(_, _ string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
-				tr.Status = authenticationv1.TokenRequestStatus{
-					ExpirationTimestamp: metav1.Time{Time: testMgr.clock.Now().Add(10 * time.Hour)},
-				}
-				return tr, nil
-			}
-			failGetToken := func(_, _ string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
-				return nil, fmt.Errorf("fail tr")
-			}
-
-			for _, index := range c.requestIndex {
-				req := requests[index]
-				if req.shouldFail {
-					testMgr.getToken = failGetToken
-				} else {
-					testMgr.getToken = successGetToken
-				}
-				_, _ = testMgr.GetServiceAccountToken(req.namespace, req.name, &req.tr)
-			}
-
-			for _, uid := range c.deletePodUID {
-				testMgr.DeleteServiceAccountToken(uid)
-			}
-			if len(c.expLeftIndex) != len(testMgr.cache) {
-				t.Errorf("%s got unexpected result: expected left cache size is %d, got %d", c.name, len(c.expLeftIndex), len(testMgr.cache))
-			}
-			for _, leftIndex := range c.expLeftIndex {
-				r := requests[leftIndex]
-				_, ok := testMgr.get(keyFunc(r.name, r.namespace, &r.tr))
-				if !ok {
-					t.Errorf("%s got unexpected result: expected token request %v exist in cache, but not", c.name, r)
-				}
-			}
-		})
-	}
-}
-
-type fakeTokenGetter struct {
-	count int
-	tr    *authenticationv1.TokenRequest
-	err   error
-}
-
-func (ftg *fakeTokenGetter) getToken(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
-	ftg.count++
-	return ftg.tr, ftg.err
-}
-
-func TestCleanup(t *testing.T) {
-	cases := []struct {
-		name              string
-		relativeExp       time.Duration
-		expectedCacheSize int
-	}{
-		{
-			name:              "don't cleanup unexpired tokens",
-			relativeExp:       -1 * time.Hour,
-			expectedCacheSize: 0,
-		},
-		{
-			name:              "cleanup expired tokens",
-			relativeExp:       time.Hour,
-			expectedCacheSize: 1,
-		},
-	}
-	for _, c := range cases {
-		t.Run(c.name, func(t *testing.T) {
-			clock := testingclock.NewFakeClock(time.Time{}.Add(24 * time.Hour))
-			mgr := NewManager(nil)
-			mgr.clock = clock
-
-			mgr.set("key", &authenticationv1.TokenRequest{
-				Status: authenticationv1.TokenRequestStatus{
-					ExpirationTimestamp: metav1.Time{Time: mgr.clock.Now().Add(c.relativeExp)},
-				},
-			})
-			mgr.cleanup()
-			if got, want := len(mgr.cache), c.expectedCacheSize; got != want {
-				t.Fatalf("unexpected number of cache entries after cleanup, got: %d, want: %d", got, want)
-			}
-		})
-	}
-}
-
-func TestKeyFunc(t *testing.T) {
-	type tokenRequestUnit struct {
-		name      string
-		namespace string
-		tr        *authenticationv1.TokenRequest
-	}
-	getKeyFunc := func(u tokenRequestUnit) string {
-		return keyFunc(u.name, u.namespace, u.tr)
-	}
-
-	cases := []struct {
-		name   string
-		trus   []tokenRequestUnit
-		target tokenRequestUnit
-
-		shouldHit bool
-	}{
-		{
-			name: "hit",
-			trus: []tokenRequestUnit{
-				{
-					name:      "foo-sa",
-					namespace: "foo-ns",
-					tr: &authenticationv1.TokenRequest{
-						Spec: authenticationv1.TokenRequestSpec{
-							Audiences:         []string{"foo1", "foo2"},
-							ExpirationSeconds: getInt64Point(2000),
-							BoundObjectRef: &authenticationv1.BoundObjectReference{
-								Kind: "pod",
-								Name: "foo-pod",
-								UID:  "foo-uid",
-							},
-						},
-					},
-				},
-				{
-					name:      "ame-sa",
-					namespace: "ame-ns",
-					tr: &authenticationv1.TokenRequest{
-						Spec: authenticationv1.TokenRequestSpec{
-							Audiences:         []string{"ame1", "ame2"},
-							ExpirationSeconds: getInt64Point(2000),
-							BoundObjectRef: &authenticationv1.BoundObjectReference{
-								Kind: "pod",
-								Name: "ame-pod",
-								UID:  "ame-uid",
-							},
-						},
-					},
-				},
-			},
-			target: tokenRequestUnit{
-				name:      "foo-sa",
-				namespace: "foo-ns",
-				tr: &authenticationv1.TokenRequest{
-					Spec: authenticationv1.TokenRequestSpec{
-						Audiences:         []string{"foo1", "foo2"},
-						ExpirationSeconds: getInt64Point(2000),
-						BoundObjectRef: &authenticationv1.BoundObjectReference{
-							Kind: "pod",
-							Name: "foo-pod",
-							UID:  "foo-uid",
-						},
-					},
-				},
-			},
-			shouldHit: true,
-		},
-		{
-			name: "not hit due to different ExpirationSeconds",
-			trus: []tokenRequestUnit{
-				{
-					name:      "foo-sa",
-					namespace: "foo-ns",
-					tr: &authenticationv1.TokenRequest{
-						Spec: authenticationv1.TokenRequestSpec{
-							Audiences:         []string{"foo1", "foo2"},
-							ExpirationSeconds: getInt64Point(2000),
-							BoundObjectRef: &authenticationv1.BoundObjectReference{
-								Kind: "pod",
-								Name: "foo-pod",
-								UID:  "foo-uid",
-							},
-						},
-					},
-				},
-			},
-			target: tokenRequestUnit{
-				name:      "foo-sa",
-				namespace: "foo-ns",
-				tr: &authenticationv1.TokenRequest{
-					Spec: authenticationv1.TokenRequestSpec{
-						Audiences: []string{"foo1", "foo2"},
-						// everything is same besides ExpirationSeconds
-						ExpirationSeconds: getInt64Point(2001),
-						BoundObjectRef: &authenticationv1.BoundObjectReference{
-							Kind: "pod",
-							Name: "foo-pod",
-							UID:  "foo-uid",
-						},
-					},
-				},
-			},
-			shouldHit: false,
-		},
-		{
-			name: "not hit due to different BoundObjectRef",
-			trus: []tokenRequestUnit{
-				{
-					name:      "foo-sa",
-					namespace: "foo-ns",
-					tr: &authenticationv1.TokenRequest{
-						Spec: authenticationv1.TokenRequestSpec{
-							Audiences:         []string{"foo1", "foo2"},
-							ExpirationSeconds: getInt64Point(2000),
-							BoundObjectRef: &authenticationv1.BoundObjectReference{
-								Kind: "pod",
-								Name: "foo-pod",
-								UID:  "foo-uid",
-							},
-						},
-					},
-				},
-			},
-			target: tokenRequestUnit{
-				name:      "foo-sa",
-				namespace: "foo-ns",
-				tr: &authenticationv1.TokenRequest{
-					Spec: authenticationv1.TokenRequestSpec{
-						Audiences:         []string{"foo1", "foo2"},
-						ExpirationSeconds: getInt64Point(2000),
-						BoundObjectRef: &authenticationv1.BoundObjectReference{
-							Kind: "pod",
-							// everything is same besides BoundObjectRef.Name
-							Name: "diff-pod",
-							UID:  "foo-uid",
-						},
-					},
-				},
-			},
-			shouldHit: false,
-		},
-	}
-
-	for _, c := range cases {
-		t.Run(c.name, func(t *testing.T) {
-			mgr := NewManager(nil)
-			mgr.clock = testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
-			for _, tru := range c.trus {
-				mgr.set(getKeyFunc(tru), &authenticationv1.TokenRequest{
-					Status: authenticationv1.TokenRequestStatus{
-						// make sure the token cache would not be cleaned by token manager cleanup func
-						ExpirationTimestamp: metav1.Time{Time: mgr.clock.Now().Add(50 * time.Minute)},
-					},
-				})
-			}
-			_, hit := mgr.get(getKeyFunc(c.target))
-
-			if hit != c.shouldHit {
-				t.Errorf("%s got unexpected hit result: expected to be %t, got %t", c.name, c.shouldHit, hit)
-			}
-		})
-	}
-}
-
-func getTokenRequest() *authenticationv1.TokenRequest {
-	return &authenticationv1.TokenRequest{
-		Spec: authenticationv1.TokenRequestSpec{
-			Audiences:         []string{"foo1", "foo2"},
-			ExpirationSeconds: getInt64Point(2000),
-			BoundObjectRef: &authenticationv1.BoundObjectReference{
-				Kind: "pod",
-				Name: "foo-pod",
-				UID:  "foo-uid",
-			},
-		},
-	}
-}
-
-func getInt64Point(v int64) *int64 {
-	return &v
-}
diff --git a/pkg/k8s/token_test.go b/pkg/k8s/token_test.go
deleted file mode 100644
index 68f2444cb..000000000
--- a/pkg/k8s/token_test.go
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
-Copyright 2022 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package k8s
-
-import (
-	"fmt"
-	"testing"
-	"time"
-
-	"github.com/google/go-cmp/cmp"
-	authenticationv1 "k8s.io/api/authentication/v1"
-	storagev1 "k8s.io/api/storage/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/types"
-	"k8s.io/apimachinery/pkg/util/wait"
-	fakeclient "k8s.io/client-go/kubernetes/fake"
-	clitesting "k8s.io/client-go/testing"
-	"k8s.io/utils/pointer"
-)
-
-var (
-	testDriver    = "test-driver"
-	testAccount   = "test-service-account"
-	testPod       = "test-pod"
-	testNamespace = "test-ns"
-	testUID       = "test-uid"
-)
-
-func TestPodServiceAccountTokenAttrs(t *testing.T) {
-	scheme := runtime.NewScheme()
-	audience := "aud"
-
-	tests := []struct {
-		desc                         string
-		driver                       *storagev1.CSIDriver
-		wantServiceAccountTokenAttrs map[string]string
-	}{
-		{
-			desc: "csi driver has no ServiceAccountToken",
-			driver: &storagev1.CSIDriver{
-				ObjectMeta: metav1.ObjectMeta{
-					Name: testDriver,
-				},
-				Spec: storagev1.CSIDriverSpec{},
-			},
-			wantServiceAccountTokenAttrs: nil,
-		},
-		{
-			desc: "one token with empty string as audience",
-			driver: &storagev1.CSIDriver{
-				ObjectMeta: metav1.ObjectMeta{
-					Name: testDriver,
-				},
-				Spec: storagev1.CSIDriverSpec{
-					TokenRequests: []storagev1.TokenRequest{
-						{
-							Audience: "",
-						},
-					},
-				},
-			},
-			wantServiceAccountTokenAttrs: map[string]string{"csi.storage.k8s.io/serviceAccount.tokens": `{"":{"token":"test-ns:test-service-account:3600:[api]","expirationTimestamp":"1970-01-01T00:00:01Z"}}`},
-		},
-		{
-			desc: "one token with non-empty string as audience",
-			driver: &storagev1.CSIDriver{
-				ObjectMeta: metav1.ObjectMeta{
-					Name: testDriver,
-				},
-				Spec: storagev1.CSIDriverSpec{
-					TokenRequests: []storagev1.TokenRequest{
-						{
-							Audience: audience,
-						},
-					},
-				},
-			},
-			wantServiceAccountTokenAttrs: map[string]string{"csi.storage.k8s.io/serviceAccount.tokens": `{"aud":{"token":"test-ns:test-service-account:3600:[aud]","expirationTimestamp":"1970-01-01T00:00:01Z"}}`},
-		},
-	}
-
-	for _, test := range tests {
-		t.Run(test.desc, func(t *testing.T) {
-			client := fakeclient.NewSimpleClientset()
-			if test.driver != nil {
-				test.driver.Spec.VolumeLifecycleModes = []storagev1.VolumeLifecycleMode{
-					storagev1.VolumeLifecycleEphemeral,
-				}
-				scheme.Default(test.driver)
-				client = fakeclient.NewSimpleClientset(test.driver)
-			}
-			client.PrependReactor("create", "serviceaccounts", clitesting.ReactionFunc(func(action clitesting.Action) (bool, runtime.Object, error) {
-				tr := action.(clitesting.CreateAction).GetObject().(*authenticationv1.TokenRequest)
-				scheme.Default(tr)
-				if len(tr.Spec.Audiences) == 0 {
-					tr.Spec.Audiences = []string{"api"}
-				}
-				if tr.Spec.ExpirationSeconds == nil {
-					tr.Spec.ExpirationSeconds = pointer.Int64(3600)
-				}
-				tr.Status.Token = fmt.Sprintf("%v:%v:%d:%v", action.GetNamespace(), testAccount, *tr.Spec.ExpirationSeconds, tr.Spec.Audiences)
-				tr.Status.ExpirationTimestamp = metav1.NewTime(time.Unix(1, 1))
-				return true, tr, nil
-			}))
-
-			tokenClient := NewTokenClient(client, testDriver, 1*time.Second)
-			_ = tokenClient.Run(wait.NeverStop)
-			waitForInformerCacheSync()
-
-			attrs, _ := tokenClient.PodServiceAccountTokenAttrs(testNamespace, testPod, testAccount, types.UID(testUID))
-			if diff := cmp.Diff(test.wantServiceAccountTokenAttrs, attrs); diff != "" {
-				t.Errorf("PodServiceAccountTokenAttrs() returned diff (-want +got):\n%s", diff)
-			}
-		})
-	}
-}
diff --git a/pkg/rotation/reconciler.go b/pkg/rotation/reconciler.go
deleted file mode 100644
index e82d12f79..000000000
--- a/pkg/rotation/reconciler.go
+++ /dev/null
@@ -1,626 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package rotation
-
-import (
-	"context"
-	"encoding/json"
-	"fmt"
-	"os"
-	"strings"
-	"time"
-
-	secretsstorev1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1"
-	"sigs.k8s.io/secrets-store-csi-driver/controllers"
-	secretsStoreClient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned"
-	internalerrors "sigs.k8s.io/secrets-store-csi-driver/pkg/errors"
-	"sigs.k8s.io/secrets-store-csi-driver/pkg/k8s"
-	secretsstore "sigs.k8s.io/secrets-store-csi-driver/pkg/secrets-store"
-	"sigs.k8s.io/secrets-store-csi-driver/pkg/util/fileutil"
-	"sigs.k8s.io/secrets-store-csi-driver/pkg/util/k8sutil"
-	"sigs.k8s.io/secrets-store-csi-driver/pkg/util/secretutil"
-	"sigs.k8s.io/secrets-store-csi-driver/pkg/util/spcpsutil"
-	"sigs.k8s.io/secrets-store-csi-driver/pkg/version"
-
-	corev1 "k8s.io/api/core/v1"
-	apierrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/types"
-	"k8s.io/apimachinery/pkg/util/strategicpatch"
-	"k8s.io/apimachinery/pkg/util/wait"
-	"k8s.io/client-go/kubernetes"
-	clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
-	"k8s.io/client-go/rest"
-	"k8s.io/client-go/tools/cache"
-	"k8s.io/client-go/tools/clientcmd"
-	"k8s.io/client-go/tools/record"
-	"k8s.io/client-go/util/workqueue"
-	"k8s.io/klog/v2"
-	"monis.app/mlog"
-	"sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-const (
-	maxNumOfRequeues int = 5
-
-	mountRotationFailedReason       = "MountRotationFailed"
-	mountRotationCompleteReason     = "MountRotationComplete"
-	k8sSecretRotationFailedReason   = "SecretRotationFailed"
-	k8sSecretRotationCompleteReason = "SecretRotationComplete"
-)
-
-// Reconciler reconciles and rotates contents in the pod
-// and Kubernetes secrets periodically
-type Reconciler struct {
-	rotationPollInterval time.Duration
-	providerClients      *secretsstore.PluginClientBuilder
-	queue                workqueue.RateLimitingInterface
-	reporter             StatsReporter
-	eventRecorder        record.EventRecorder
-	kubeClient           kubernetes.Interface
-	crdClient            secretsStoreClient.Interface
-	// cache contains v1.Pod, secretsstorev1.SecretProviderClassPodStatus (both filtered on *nodeID),
-	// v1.Secret (filtered on secrets-store.csi.k8s.io/managed=true)
-	cache client.Reader
-	// secretStore stores Secret (filtered on secrets-store.csi.k8s.io/used=true)
-	secretStore k8s.Store
-	tokenClient *k8s.TokenClient
-
-	driverName string
-}
-
-// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch
-// These permissions are required for secret rotation + nodePublishSecretRef
-// TODO (aramase) remove this as part of https://github.com/kubernetes-sigs/secrets-store-csi-driver/issues/585
-
-// NewReconciler returns a new reconciler for rotation
-func NewReconciler(driverName string,
-	client client.Reader,
-	s *runtime.Scheme,
-	rotationPollInterval time.Duration,
-	providerClients *secretsstore.PluginClientBuilder,
-	tokenClient *k8s.TokenClient) (*Reconciler, error) {
-	config, err := buildConfig()
-	if err != nil {
-		return nil, err
-	}
-	config.UserAgent = version.GetUserAgent("rotation")
-	kubeClient := kubernetes.NewForConfigOrDie(config)
-	crdClient := secretsStoreClient.NewForConfigOrDie(config)
-	eventBroadcaster := record.NewBroadcaster()
-	eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
-	recorder := eventBroadcaster.NewRecorder(s, corev1.EventSource{Component: "csi-secrets-store-rotation"})
-	secretStore, err := k8s.New(kubeClient, 5*time.Second)
-	if err != nil {
-		return nil, err
-	}
-	sr, err := newStatsReporter()
-	if err != nil {
-		return nil, err
-	}
-
-	return &Reconciler{
-		rotationPollInterval: rotationPollInterval,
-		providerClients:      providerClients,
-		reporter:             sr,
-		queue:                workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
-		eventRecorder:        recorder,
-		kubeClient:           kubeClient,
-		crdClient:            crdClient,
-		// cache store Pod,
-		cache:       client,
-		secretStore: secretStore,
-		tokenClient: tokenClient,
-
-		driverName: driverName,
-	}, nil
-}
-
-// Run starts the rotation reconciler
-func (r *Reconciler) Run(stopCh <-chan struct{}) {
-	if err := r.runErr(stopCh); err != nil {
-		mlog.Fatal(err)
-	}
-}
-
-func (r *Reconciler) runErr(stopCh <-chan struct{}) error {
-	defer r.queue.ShutDown()
-	klog.InfoS("starting rotation reconciler", "rotationPollInterval", r.rotationPollInterval)
-
-	ticker := time.NewTicker(r.rotationPollInterval)
-	defer ticker.Stop()
-
-	if err := r.secretStore.Run(stopCh); err != nil {
-		klog.ErrorS(err, "failed to run informers for rotation reconciler")
-		return err
-	}
-
-	// TODO (aramase) consider adding more workers to process reconcile concurrently
-	for i := 0; i < 1; i++ {
-		go wait.Until(r.runWorker, time.Second, stopCh)
-	}
-
-	for {
-		select {
-		case <-stopCh:
-			return nil
-		case <-ticker.C:
-			// The spc pod status informer is configured to do a filtered list watch of spc pod statuses
-			// labeled for the same node as the driver. LIST will only return the filtered results.
-			spcPodStatusList := &secretsstorev1.SecretProviderClassPodStatusList{}
-			err := r.cache.List(context.Background(), spcPodStatusList)
-			if err != nil {
-				klog.ErrorS(err, "failed to list secret provider class pod status for node", "controller", "rotation")
-				continue
-			}
-			for i := range spcPodStatusList.Items {
-				key, err := cache.MetaNamespaceKeyFunc(&spcPodStatusList.Items[i])
-				if err == nil {
-					r.queue.Add(key)
-				}
-			}
-		}
-	}
-}
-
-// runWorker runs a thread that process the queue
-func (r *Reconciler) runWorker() {
-	// nolint
-	for r.processNextItem() {
-
-	}
-}
-
-// processNextItem picks the next available item in the queue and triggers reconcile
-func (r *Reconciler) processNextItem() bool {
-	ctx := context.Background()
-	var err error
-
-	key, quit := r.queue.Get()
-	if quit {
-		return false
-	}
-	defer r.queue.Done(key)
-
-	spcps := &secretsstorev1.SecretProviderClassPodStatus{}
-	keyParts := strings.Split(key.(string), "/")
-	if len(keyParts) < 2 {
-		err = fmt.Errorf("key is not in correct format. expected key format is namespace/name")
-	} else {
-		err = r.cache.Get(
-			ctx,
-			client.ObjectKey{
-				Namespace: keyParts[0],
-				Name:      keyParts[1],
-			},
-			spcps,
-		)
-	}
-
-	if err != nil {
-		// set the log level to 5 so we don't spam the logs with spc pod status not found
-		klog.V(5).ErrorS(err, "failed to get spc pod status", "spcps", key.(string), "controller", "rotation")
-		rateLimited := false
-		// If the error is that spc pod status not found in cache, only retry
-		// with a limit instead of infinite retries.
-		// The cache miss could be because of
-		//   1. The pod was deleted and the spc pod status no longer exists
-		//		We limit the requeue to only 5 times. After 5 times if the spc pod status
-		//		is no longer found, then it will be retried in the next reconcile Run if it's
-		//		an intermittent cache population delay.
-		//   2. The spc pod status has not yet been populated in the cache
-		// 		this is highly unlikely as the spc pod status was added to the queue
-		// 		in Run method after the List call from the same informer cache.
-		if apierrors.IsNotFound(err) {
-			rateLimited = true
-		}
-		r.handleError(err, key, rateLimited)
-		return true
-	}
-	klog.V(3).InfoS("reconciler started", "spcps", klog.KObj(spcps), "controller", "rotation")
-	if err = r.reconcile(ctx, spcps); err != nil {
-		klog.ErrorS(err, "failed to reconcile spc for pod", "spc",
-			spcps.Status.SecretProviderClassName, "pod", spcps.Status.PodName, "controller", "rotation")
-	}
-
-	klog.V(3).InfoS("reconciler completed", "spcps", klog.KObj(spcps), "controller", "rotation")
-	r.handleError(err, key, false)
-	return true
-}
-
-//gocyclo:ignore
-func (r *Reconciler) reconcile(ctx context.Context, spcps *secretsstorev1.SecretProviderClassPodStatus) (err error) {
-	begin := time.Now()
-	errorReason := internalerrors.FailedToRotate
-	// requiresUpdate is set to true when the new object versions differ from the current object versions
-	// after the provider mount request is complete
-	var requiresUpdate bool
-	var providerName string
-
-	defer func() {
-		if err != nil {
-			r.reporter.reportRotationErrorCtMetric(ctx, providerName, errorReason, requiresUpdate)
-			return
-		}
-		r.reporter.reportRotationCtMetric(ctx, providerName, requiresUpdate)
-		r.reporter.reportRotationDuration(ctx, time.Since(begin).Seconds())
-	}()
-
-	// get pod from manager's cache
-	pod := &corev1.Pod{}
-	err = r.cache.Get(
-		ctx,
-		client.ObjectKey{
-			Namespace: spcps.Namespace,
-			Name:      spcps.Status.PodName,
-		},
-		pod,
-	)
-	if err != nil {
-		errorReason = internalerrors.PodNotFound
-		return fmt.Errorf("failed to get pod %s/%s, err: %w", spcps.Namespace, spcps.Status.PodName, err)
-	}
-	// skip rotation if the pod is being terminated
-	// or the pod is in succeeded state (for jobs that complete aren't gc yet)
-	// or the pod is in a failed state (all containers get terminated).
-	// the spcps will be gc when the pod is deleted and will not show up in the next rotation cycle
-	if !pod.GetDeletionTimestamp().IsZero() || pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed {
-		klog.V(5).InfoS("pod is being terminated, skipping rotation", "pod", klog.KObj(pod))
-		return nil
-	}
-
-	// get the secret provider class which pod status is referencing from manager's cache
-	spc := &secretsstorev1.SecretProviderClass{}
-	err = r.cache.Get(
-		ctx,
-		client.ObjectKey{
-			Namespace: spcps.Namespace,
-			Name:      spcps.Status.SecretProviderClassName,
-		},
-		spc,
-	)
-	if err != nil {
-		errorReason = internalerrors.SecretProviderClassNotFound
-		return fmt.Errorf("failed to get secret provider class %s/%s, err: %w", spcps.Namespace, spcps.Status.SecretProviderClassName, err)
-	}
-
-	// determine which pod volume this is associated with
-	podVol := k8sutil.SPCVolume(pod, r.driverName, spc.Name)
-	if podVol == nil {
-		errorReason = internalerrors.PodVolumeNotFound
-		return fmt.Errorf("could not find secret provider class pod status volume for pod %s/%s", pod.Namespace, pod.Name)
-	}
-
-	// validate TargetPath
-	if fileutil.GetPodUIDFromTargetPath(spcps.Status.TargetPath) != string(pod.UID) {
-		errorReason = internalerrors.UnexpectedTargetPath
-		return fmt.Errorf("secret provider class pod status(spcps) targetPath did not match pod UID for pod %s/%s", pod.Namespace, pod.Name)
-	}
-	if fileutil.GetVolumeNameFromTargetPath(spcps.Status.TargetPath) != podVol.Name {
-		errorReason = internalerrors.UnexpectedTargetPath
-		return fmt.Errorf("secret provider class pod status(spcps) volume name does not match the volume name in the pod %s/%s", pod.Namespace, pod.Name)
-	}
-
-	parameters := make(map[string]string)
-	if spc.Spec.Parameters != nil {
-		parameters = spc.Spec.Parameters
-	}
-	// Set these parameters to mimic the exact same attributes we get as part of NodePublishVolumeRequest
-	parameters[secretsstore.CSIPodName] = pod.Name
-	parameters[secretsstore.CSIPodNamespace] = pod.Namespace
-	parameters[secretsstore.CSIPodUID] = string(pod.UID)
-	parameters[secretsstore.CSIPodServiceAccountName] = pod.Spec.ServiceAccountName
-	// csi.storage.k8s.io/serviceAccount.tokens is empty for Kubernetes version < 1.20.
-	// For 1.20+, if tokenRequests is set in the CSI driver spec, kubelet will generate
-	// a token for the pod and send it to the CSI driver.
-	// This check is done for backward compatibility to support passing token from driver
-	// to provider irrespective of the Kubernetes version. If the token doesn't exist in the
-	// volume request context, the CSI driver will generate the token for the configured audience
-	// and send it to the provider in the parameters.
-	serviceAccountTokenAttrs, err := r.tokenClient.PodServiceAccountTokenAttrs(pod.Namespace, pod.Name, pod.Spec.ServiceAccountName, pod.UID)
-	if err != nil {
-		return fmt.Errorf("failed to get service account token attrs, err: %w", err)
-	}
-	for k, v := range serviceAccountTokenAttrs {
-		parameters[k] = v
-	}
-
-	paramsJSON, err := json.Marshal(parameters)
-	if err != nil {
-		return fmt.Errorf("failed to marshal parameters, err: %w", err)
-	}
-	permissionJSON, err := json.Marshal(secretsstore.FilePermission)
-	if err != nil {
-		return fmt.Errorf("failed to marshal permission, err: %w", err)
-	}
-
-	// check if the volume pertaining to the current spc is using nodePublishSecretRef for
-	// accessing external secrets store
-	nodePublishSecretRef := podVol.CSI.NodePublishSecretRef
-
-	var secretsJSON []byte
-	nodePublishSecretData := make(map[string]string)
-	// read the Kubernetes secret referenced in NodePublishSecretRef and marshal it
-	// This comprises the secret parameter in the MountRequest to the provider
-	if nodePublishSecretRef != nil {
-		// read secret from the informer cache
-		secret, err := r.secretStore.GetNodePublishSecretRefSecret(nodePublishSecretRef.Name, spcps.Namespace)
-		if err != nil {
-			if apierrors.IsNotFound(err) {
-				klog.ErrorS(err,
-					fmt.Sprintf("nodePublishSecretRef not found. If the secret with name exists in namespace, label the secret by running 'kubectl label secret %s %s=true -n %s", nodePublishSecretRef.Name, controllers.SecretUsedLabel, spcps.Namespace),
-					"name", nodePublishSecretRef.Name, "namespace", spcps.Namespace)
-			}
-			errorReason = internalerrors.NodePublishSecretRefNotFound
-			r.generateEvent(pod, corev1.EventTypeWarning, mountRotationFailedReason, fmt.Sprintf("failed to get node publish secret %s/%s, err: %+v", spcps.Namespace, nodePublishSecretRef.Name, err))
-			return fmt.Errorf("failed to get node publish secret %s/%s, err: %w", spcps.Namespace, nodePublishSecretRef.Name, err)
-		}
-
-		for k, v := range secret.Data {
-			nodePublishSecretData[k] = string(v)
-		}
-	}
-
-	secretsJSON, err = json.Marshal(nodePublishSecretData)
-	if err != nil {
-		r.generateEvent(pod, corev1.EventTypeWarning, mountRotationFailedReason, fmt.Sprintf("failed to marshal node publish secret data, err: %+v", err))
-		return fmt.Errorf("failed to marshal node publish secret data, err: %w", err)
-	}
-
-	// generate a map with the current object versions stored in spc pod status
-	// the old object versions are passed on to the provider as part of the MountRequest.
-	// the provider can use these current object versions to decide if any action is required
-	// and if the objects need to be rotated
-	oldObjectVersions := make(map[string]string)
-	for _, obj := range spcps.Status.Objects {
-		oldObjectVersions[obj.ID] = obj.Version
-	}
-
-	providerName = string(spc.Spec.Provider)
-	providerClient, err := r.providerClients.Get(ctx, providerName)
-	if err != nil {
-		errorReason = internalerrors.FailedToLookupProviderGRPCClient
-		r.generateEvent(pod, corev1.EventTypeWarning, mountRotationFailedReason, fmt.Sprintf("failed to lookup provider client: %q", providerName))
-		return fmt.Errorf("failed to lookup provider client: %q", providerName)
-	}
-	newObjectVersions, errorReason, err := secretsstore.MountContent(ctx, providerClient, string(paramsJSON), string(secretsJSON), spcps.Status.TargetPath, string(permissionJSON), oldObjectVersions)
-	if err != nil {
-		r.generateEvent(pod, corev1.EventTypeWarning, mountRotationFailedReason, fmt.Sprintf("provider mount err: %+v", err))
-		return fmt.Errorf("failed to rotate objects for pod %s/%s, err: %w", spcps.Namespace, spcps.Status.PodName, err)
-	}
-
-	// compare the old object versions and new object versions to check if any of the objects
-	// have been updated by the provider
-	for k, v := range newObjectVersions {
-		version, ok := oldObjectVersions[strings.TrimSpace(k)]
-		if ok && strings.TrimSpace(version) == strings.TrimSpace(v) {
-			continue
-		}
-		requiresUpdate = true
-		break
-	}
-	// if the spc was updated after initial deployment to remove an existing object, then we
-	// need to update the objects list with the current list to reflect only what's in the pod
-	if len(oldObjectVersions) != len(newObjectVersions) {
-		requiresUpdate = true
-	}
-
-	var errs []error
-	// this loop is executed if there is a difference in the current versions cached in
-	// the secret provider class pod status and the new versions returned by the provider.
-	// the diff in versions is populated in the secret provider class pod status and if the
-	// secret provider class contains secret objects, then the corresponding kubernetes secrets
-	// data is updated with the latest versions
-	if requiresUpdate {
-		// generate an event for successful mount update
-		r.generateEvent(pod, corev1.EventTypeNormal, mountRotationCompleteReason, fmt.Sprintf("successfully rotated mounted contents for spc %s/%s", spc.Namespace, spc.Name))
-		klog.InfoS("updating versions in spc pod status", "spcps", klog.KObj(spcps), "controller", "rotation")
-
-		var ov []secretsstorev1.SecretProviderClassObject
-		for k, v := range newObjectVersions {
-			ov = append(ov, secretsstorev1.SecretProviderClassObject{ID: strings.TrimSpace(k), Version: strings.TrimSpace(v)})
-		}
-		spcps.Status.Objects = spcpsutil.OrderSecretProviderClassObjectByID(ov)
-
-		updateFn := func() (bool, error) {
-			err = r.updateSecretProviderClassPodStatus(ctx, spcps)
-			updated := true
-			if err != nil {
-				klog.ErrorS(err, "failed to update latest versions in spc pod status", "spcps", klog.KObj(spcps), "controller", "rotation")
-				updated = false
-			}
-			return updated, nil
-		}
-
-		if err := wait.ExponentialBackoff(wait.Backoff{
-			Steps:    5,
-			Duration: 1 * time.Millisecond,
-			Factor:   1.0,
-			Jitter:   0.1,
-		}, updateFn); err != nil {
-			r.generateEvent(pod, corev1.EventTypeWarning, mountRotationFailedReason, fmt.Sprintf("failed to update versions in spc pod status %s, err: %+v", spc.Name, err))
-			return fmt.Errorf("failed to update spc pod status, err: %w", err)
-		}
-	}
-
-	if len(spc.Spec.SecretObjects) == 0 {
-		klog.InfoS("spc doesn't contain secret objects", "spc", klog.KObj(spc), "pod", klog.KObj(pod), "controller", "rotation")
-		return nil
-	}
-	files, err := fileutil.GetMountedFiles(spcps.Status.TargetPath)
-	if err != nil {
-		r.generateEvent(pod, corev1.EventTypeWarning, k8sSecretRotationFailedReason, fmt.Sprintf("failed to get mounted files, err: %+v", err))
-		return fmt.Errorf("failed to get mounted files, err: %w", err)
-	}
-	for _, secretObj := range spc.Spec.SecretObjects {
-		secretName := strings.TrimSpace(secretObj.SecretName)
-
-		if err = secretutil.ValidateSecretObject(*secretObj); err != nil {
-			r.generateEvent(pod, corev1.EventTypeWarning, k8sSecretRotationFailedReason, fmt.Sprintf("failed validation for secret object in spc %s/%s, err: %+v", spc.Namespace, spc.Name, err))
-			klog.ErrorS(err, "failed validation for secret object in spc", "spc", klog.KObj(spc), "controller", "rotation")
-			errs = append(errs, err)
-			continue
-		}
-
-		secretType := secretutil.GetSecretType(strings.TrimSpace(secretObj.Type))
-		var datamap map[string][]byte
-		if datamap, err = secretutil.GetSecretData(secretObj.Data, secretType, files); err != nil {
-			r.generateEvent(pod, corev1.EventTypeWarning, k8sSecretRotationFailedReason, fmt.Sprintf("failed to get data in spc %s/%s for secret %s, err: %+v", spc.Namespace, spc.Name, secretName, err))
-			klog.ErrorS(err, "failed to get data in spc for secret", "spc", klog.KObj(spc), "secret", klog.ObjectRef{Namespace: spc.Namespace, Name: secretName}, "controller", "rotation")
-			errs = append(errs, err)
-			continue
-		}
-
-		patchFn := func() (bool, error) {
-			// patch secret data with the new contents
-			if err := r.patchSecret(ctx, secretObj.SecretName, spcps.Namespace, datamap); err != nil {
-				// syncSecret.enabled is set to false by default in the helm chart for installing the driver in v0.0.23+
-				// that would result in a forbidden error, so generate a warning that can be helpful for debugging
-				if apierrors.IsForbidden(err) {
-					klog.Warning(controllers.SyncSecretForbiddenWarning)
-				}
-				klog.ErrorS(err, "failed to patch secret data", "secret", klog.ObjectRef{Namespace: spc.Namespace, Name: secretName}, "spc", klog.KObj(spc), "controller", "rotation")
-				return false, nil
-			}
-			return true, nil
-		}
-
-		if err := wait.ExponentialBackoff(wait.Backoff{
-			Steps:    5,
-			Duration: 1 * time.Millisecond,
-			Factor:   1.0,
-			Jitter:   0.1,
-		}, patchFn); err != nil {
-			r.generateEvent(pod, corev1.EventTypeWarning, k8sSecretRotationFailedReason, fmt.Sprintf("failed to patch secret %s with new data, err: %+v", secretName, err))
-			// continue to ensure error in a single secret doesn't block the updates
-			// for all other secret objects defined in SPC
-			continue
-		}
-		r.generateEvent(pod, corev1.EventTypeNormal, k8sSecretRotationCompleteReason, fmt.Sprintf("successfully rotated K8s secret %s", secretName))
-	}
-
-	// for errors with individual secret objects in spc, we continue to the next secret object
-	// to prevent error with one secret from affecting rotation of all other k8s secret
-	// this consolidation of errors within the loop determines if the spc pod status still needs
-	// to be retried at the end of this rotation reconcile loop
-	if len(errs) > 0 {
-		return fmt.Errorf("failed to rotate one or more k8s secrets, err: %+v", errs)
-	}
-
-	return nil
-}
-
-// updateSecretProviderClassPodStatus updates secret provider class pod status
-func (r *Reconciler) updateSecretProviderClassPodStatus(ctx context.Context, spcPodStatus *secretsstorev1.SecretProviderClassPodStatus) error {
-	// update the secret provider class pod status
-	_, err := r.crdClient.SecretsstoreV1().SecretProviderClassPodStatuses(spcPodStatus.Namespace).Update(ctx, spcPodStatus, metav1.UpdateOptions{})
-	return err
-}
-
-// patchSecret patches secret with the new data and returns error if any
-func (r *Reconciler) patchSecret(ctx context.Context, name, namespace string, data map[string][]byte) error {
-	secret := &corev1.Secret{}
-	err := r.cache.Get(
-		ctx,
-		client.ObjectKey{
-			Namespace: namespace,
-			Name:      name,
-		},
-		secret,
-	)
-	// if there is an error getting the secret -
-	// 1. The secret has been deleted due to an external client
-	// 		The secretproviderclasspodstatus controller will recreate the
-	//		secret as part of the reconcile operation. We don't want to duplicate
-	//		the operation in multiple controllers.
-	// 2. An actual error communicating with the API server, then just return
-	if err != nil {
-		return err
-	}
-
-	currentDataSHA, err := secretutil.GetSHAFromSecret(secret.Data)
-	if err != nil {
-		return fmt.Errorf("failed to compute SHA for %s/%s old data, err: %w", namespace, name, err)
-	}
-	newDataSHA, err := secretutil.GetSHAFromSecret(data)
-	if err != nil {
-		return fmt.Errorf("failed to compute SHA for %s/%s new data, err: %w", namespace, name, err)
-	}
-	// if the SHA for the current data and new data match then skip
-	// the redundant API call to patch the same data
-	if currentDataSHA == newDataSHA {
-		return nil
-	}
-
-	newSecret := *secret
-	newSecret.Data = data
-	oldData, err := json.Marshal(secret)
-	if err != nil {
-		return fmt.Errorf("failed to marshal old secret, err: %w", err)
-	}
-	secret.Data = data
-	newData, err := json.Marshal(&newSecret)
-	if err != nil {
-		return fmt.Errorf("failed to marshal new secret, err: %w", err)
-	}
-	// Patching data replaces values for existing data keys
-	// and appends new keys if it doesn't already exist
-	patch, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, secret)
-	if err != nil {
-		return fmt.Errorf("failed to create patch, err: %w", err)
-	}
-	_, err = r.kubeClient.CoreV1().Secrets(namespace).Patch(ctx, name, types.MergePatchType, patch, metav1.PatchOptions{})
-	return err
-}
-
-// handleError requeue the key after 10s if there is an error while processing
-func (r *Reconciler) handleError(err error, key interface{}, rateLimited bool) {
-	if err == nil {
-		r.queue.Forget(key)
-		return
-	}
-	if !rateLimited {
-		r.queue.AddAfter(key, 10*time.Second)
-		return
-	}
-	// if the requeue for key is rate limited and the number of times the key
-	// has been added back to queue exceeds the default allowed limit, then do nothing.
-	// this is done to prevent infinitely adding the key the queue in scenarios where
-	// the key was added to the queue because of an error but has since been deleted.
-	if r.queue.NumRequeues(key) < maxNumOfRequeues {
-		r.queue.AddRateLimited(key)
-		return
-	}
-	klog.InfoS("retry budget exceeded, dropping from queue", "spcps", key)
-	r.queue.Forget(key)
-}
-
-// generateEvent generates an event
-func (r *Reconciler) generateEvent(obj runtime.Object, eventType, reason, message string) {
-	r.eventRecorder.Eventf(obj, eventType, reason, message)
-}
-
-// Create the client config. Use kubeconfig if given, otherwise assume in-cluster.
-func buildConfig() (*rest.Config, error) {
-	kubeconfigPath := os.Getenv("KUBECONFIG")
-	if kubeconfigPath != "" {
-		return clientcmd.BuildConfigFromFlags("", kubeconfigPath)
-	}
-
-	return rest.InClusterConfig()
-}
diff --git a/pkg/rotation/reconciler_test.go b/pkg/rotation/reconciler_test.go
deleted file mode 100644
index db79450b7..000000000
--- a/pkg/rotation/reconciler_test.go
+++ /dev/null
@@ -1,803 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package rotation
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"os"
-	"path/filepath"
-	"testing"
-	"time"
-
-	secretsstorev1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1"
-	"sigs.k8s.io/secrets-store-csi-driver/controllers"
-	secretsStoreFakeClient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned/fake"
-	"sigs.k8s.io/secrets-store-csi-driver/pkg/k8s"
-	secretsstore "sigs.k8s.io/secrets-store-csi-driver/pkg/secrets-store"
-	providerfake "sigs.k8s.io/secrets-store-csi-driver/provider/fake"
-
-	. "github.com/onsi/gomega"
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/types"
-	"k8s.io/apimachinery/pkg/util/wait"
-	"k8s.io/client-go/kubernetes"
-	"k8s.io/client-go/kubernetes/fake"
-	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
-	"k8s.io/client-go/tools/record"
-	"k8s.io/client-go/util/workqueue"
-	"sigs.k8s.io/controller-runtime/pkg/client"
-	controllerfake "sigs.k8s.io/controller-runtime/pkg/client/fake"
-)
-
-var (
-	fakeRecorder = record.NewFakeRecorder(20)
-)
-
-func setupScheme() (*runtime.Scheme, error) {
-	scheme := runtime.NewScheme()
-	if err := secretsstorev1.AddToScheme(scheme); err != nil {
-		return nil, err
-	}
-	if err := clientgoscheme.AddToScheme(scheme); err != nil {
-		return nil, err
-	}
-	return scheme, nil
-}
-
-func newTestReconciler(client client.Reader, kubeClient kubernetes.Interface, crdClient *secretsStoreFakeClient.Clientset, rotationPollInterval time.Duration, socketPath string) (*Reconciler, error) {
-	secretStore, err := k8s.New(kubeClient, 5*time.Second)
-	if err != nil {
-		return nil, err
-	}
-	sr, err := newStatsReporter()
-	if err != nil {
-		return nil, err
-	}
-
-	return &Reconciler{
-		rotationPollInterval: rotationPollInterval,
-		providerClients:      secretsstore.NewPluginClientBuilder([]string{socketPath}),
-		queue:                workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
-		reporter:             sr,
-		eventRecorder:        fakeRecorder,
-		kubeClient:           kubeClient,
-		crdClient:            crdClient,
-		cache:                client,
-		secretStore:          secretStore,
-		tokenClient:          k8s.NewTokenClient(kubeClient, "test-driver", 1*time.Second),
-		driverName:           "secrets-store.csi.k8s.io",
-	}, nil
-}
-
-func TestReconcileError(t *testing.T) {
-	g := NewWithT(t)
-
-	tests := []struct {
-		name                                  string
-		rotationPollInterval                  time.Duration
-		secretProviderClassPodStatusToProcess *secretsstorev1.SecretProviderClassPodStatus
-		secretProviderClassToAdd              *secretsstorev1.SecretProviderClass
-		podToAdd                              *corev1.Pod
-		socketPath                            string
-		secretToAdd                           *corev1.Secret
-		expectedObjectVersions                map[string]string
-		expectedErr                           bool
-		expectedErrorEvents                   bool
-	}{
-		{
-			name:                 "secret provider class not found",
-			rotationPollInterval: 60 * time.Second,
-			secretProviderClassPodStatusToProcess: &secretsstorev1.SecretProviderClassPodStatus{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "pod1-default-spc1",
-					Namespace: "default",
-					Labels:    map[string]string{secretsstorev1.InternalNodeLabel: "nodeName"},
-				},
-				Status: secretsstorev1.SecretProviderClassPodStatusStatus{
-					SecretProviderClassName: "spc1",
-					PodName:                 "pod1",
-				},
-			},
-			secretProviderClassToAdd: &secretsstorev1.SecretProviderClass{},
-			podToAdd:                 &corev1.Pod{},
-			socketPath:               t.TempDir(),
-			secretToAdd:              &corev1.Secret{},
-			expectedErr:              true,
-		},
-		{
-			name:                 "failed to get pod",
-			rotationPollInterval: 60 * time.Second,
-			secretProviderClassPodStatusToProcess: &secretsstorev1.SecretProviderClassPodStatus{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "pod1-default-spc1",
-					Namespace: "default",
-					Labels:    map[string]string{secretsstorev1.InternalNodeLabel: "nodeName"},
-				},
-				Status: secretsstorev1.SecretProviderClassPodStatusStatus{
-					SecretProviderClassName: "spc1",
-					PodName:                 "pod1",
-				},
-			},
-			secretProviderClassToAdd: &secretsstorev1.SecretProviderClass{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "spc1",
-					Namespace: "default",
-				},
-				Spec: secretsstorev1.SecretProviderClassSpec{
-					SecretObjects: []*secretsstorev1.SecretObject{
-						{
-							Data: []*secretsstorev1.SecretObjectData{
-								{
-									ObjectName: "object1",
-									Key:        "foo",
-								},
-							},
-						},
-					},
-				},
-			},
-			podToAdd:    &corev1.Pod{},
-			socketPath:  t.TempDir(),
-			secretToAdd: &corev1.Secret{},
-			expectedErr: true,
-		},
-		{
-			name:                 "failed to get NodePublishSecretRef secret",
-			rotationPollInterval: 60 * time.Second,
-			secretProviderClassPodStatusToProcess: &secretsstorev1.SecretProviderClassPodStatus{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "pod1-default-spc1",
-					Namespace: "default",
-					Labels:    map[string]string{secretsstorev1.InternalNodeLabel: "nodeName"},
-				},
-				Status: secretsstorev1.SecretProviderClassPodStatusStatus{
-					SecretProviderClassName: "spc1",
-					PodName:                 "pod1",
-					TargetPath:              getTestTargetPath(t, "foo", "csi-volume"),
-				},
-			},
-			secretProviderClassToAdd: &secretsstorev1.SecretProviderClass{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "spc1",
-					Namespace: "default",
-				},
-				Spec: secretsstorev1.SecretProviderClassSpec{
-					SecretObjects: []*secretsstorev1.SecretObject{
-						{
-							Data: []*secretsstorev1.SecretObjectData{
-								{
-									ObjectName: "object1",
-									Key:        "foo",
-								},
-							},
-						},
-					},
-					Provider: "provider1",
-				},
-			},
-			podToAdd: &corev1.Pod{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "pod1",
-					Namespace: "default",
-					UID:       types.UID("foo"),
-				},
-				Spec: corev1.PodSpec{
-					Volumes: []corev1.Volume{
-						{
-							Name: "csi-volume",
-							VolumeSource: corev1.VolumeSource{
-								CSI: &corev1.CSIVolumeSource{
-									Driver:           "secrets-store.csi.k8s.io",
-									VolumeAttributes: map[string]string{"secretProviderClass": "spc1"},
-									NodePublishSecretRef: &corev1.LocalObjectReference{
-										Name: "secret1",
-									},
-								},
-							},
-						},
-					},
-				},
-			},
-			socketPath:          t.TempDir(),
-			secretToAdd:         &corev1.Secret{},
-			expectedErr:         true,
-			expectedErrorEvents: true,
-		},
-		{
-			name:                 "failed to validate targetpath UID",
-			rotationPollInterval: 60 * time.Second,
-			secretProviderClassPodStatusToProcess: &secretsstorev1.SecretProviderClassPodStatus{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "pod1-default-spc1",
-					Namespace: "default",
-					Labels:    map[string]string{secretsstorev1.InternalNodeLabel: "nodeName"},
-				},
-				Status: secretsstorev1.SecretProviderClassPodStatusStatus{
-					SecretProviderClassName: "spc1",
-					PodName:                 "pod1",
-					TargetPath:              getTestTargetPath(t, "bad-uid", "csi-volume"),
-					Objects: []secretsstorev1.SecretProviderClassObject{
-						{
-							ID:      "secret/object1",
-							Version: "v1",
-						},
-					},
-				},
-			},
-			secretProviderClassToAdd: &secretsstorev1.SecretProviderClass{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "spc1",
-					Namespace: "default",
-				},
-				Spec: secretsstorev1.SecretProviderClassSpec{
-					SecretObjects: []*secretsstorev1.SecretObject{
-						{
-							Data: []*secretsstorev1.SecretObjectData{
-								{
-									ObjectName: "object1",
-									Key:        "foo",
-								},
-							},
-						},
-					},
-					Provider: "provider1",
-				},
-			},
-			podToAdd: &corev1.Pod{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "pod1",
-					Namespace: "default",
-					UID:       types.UID("foo"),
-				},
-				Spec: corev1.PodSpec{
-					Volumes: []corev1.Volume{
-						{
-							Name: "csi-volume",
-							VolumeSource: corev1.VolumeSource{
-								CSI: &corev1.CSIVolumeSource{
-									Driver:           "secrets-store.csi.k8s.io",
-									VolumeAttributes: map[string]string{"secretProviderClass": "spc1"},
-								},
-							},
-						},
-					},
-				},
-			},
-			socketPath: t.TempDir(),
-			secretToAdd: &corev1.Secret{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:            "object1",
-					Namespace:       "default",
-					ResourceVersion: "rv1",
-				},
-				Data: map[string][]byte{"foo": []byte("olddata")},
-			},
-			expectedObjectVersions: map[string]string{"secret/object1": "v2"},
-			expectedErr:            true,
-			expectedErrorEvents:    false,
-		},
-		{
-			name:                 "failed to validate targetpath volume name",
-			rotationPollInterval: 60 * time.Second,
-			secretProviderClassPodStatusToProcess: &secretsstorev1.SecretProviderClassPodStatus{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "pod1-default-spc1",
-					Namespace: "default",
-					Labels:    map[string]string{secretsstorev1.InternalNodeLabel: "nodeName"},
-				},
-				Status: secretsstorev1.SecretProviderClassPodStatusStatus{
-					SecretProviderClassName: "spc1",
-					PodName:                 "pod1",
-					TargetPath:              getTestTargetPath(t, "foo", "bad-volume-name"),
-					Objects: []secretsstorev1.SecretProviderClassObject{
-						{
-							ID:      "secret/object1",
-							Version: "v1",
-						},
-					},
-				},
-			},
-			secretProviderClassToAdd: &secretsstorev1.SecretProviderClass{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "spc1",
-					Namespace: "default",
-				},
-				Spec: secretsstorev1.SecretProviderClassSpec{
-					SecretObjects: []*secretsstorev1.SecretObject{
-						{
-							Data: []*secretsstorev1.SecretObjectData{
-								{
-									ObjectName: "object1",
-									Key:        "foo",
-								},
-							},
-						},
-					},
-					Provider: "provider1",
-				},
-			},
-			podToAdd: &corev1.Pod{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "pod1",
-					Namespace: "default",
-					UID:       types.UID("foo"),
-				},
-				Spec: corev1.PodSpec{
-					Volumes: []corev1.Volume{
-						{
-							Name: "csi-volume",
-							VolumeSource: corev1.VolumeSource{
-								CSI: &corev1.CSIVolumeSource{
-									Driver:           "secrets-store.csi.k8s.io",
-									VolumeAttributes: map[string]string{"secretProviderClass": "spc1"},
-								},
-							},
-						},
-					},
-				},
-			},
-			socketPath: t.TempDir(),
-			secretToAdd: &corev1.Secret{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:            "object1",
-					Namespace:       "default",
-					ResourceVersion: "rv1",
-				},
-				Data: map[string][]byte{"foo": []byte("olddata")},
-			},
-			expectedObjectVersions: map[string]string{"secret/object1": "v2"},
-			expectedErr:            true,
-			expectedErrorEvents:    false,
-		},
-		{
-			name:                 "failed to lookup provider client",
-			rotationPollInterval: 60 * time.Second,
-			secretProviderClassPodStatusToProcess: &secretsstorev1.SecretProviderClassPodStatus{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "pod1-default-spc1",
-					Namespace: "default",
-					Labels:    map[string]string{secretsstorev1.InternalNodeLabel: "nodeName"},
-				},
-				Status: secretsstorev1.SecretProviderClassPodStatusStatus{
-					SecretProviderClassName: "spc1",
-					PodName:                 "pod1",
-					TargetPath:              getTestTargetPath(t, "foo", "csi-volume"),
-				},
-			},
-			secretProviderClassToAdd: &secretsstorev1.SecretProviderClass{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "spc1",
-					Namespace: "default",
-				},
-				Spec: secretsstorev1.SecretProviderClassSpec{
-					SecretObjects: []*secretsstorev1.SecretObject{
-						{
-							Data: []*secretsstorev1.SecretObjectData{
-								{
-									ObjectName: "object1",
-									Key:        "foo",
-								},
-							},
-						},
-					},
-					Provider: "wrongprovider",
-				},
-			},
-			podToAdd: &corev1.Pod{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "pod1",
-					Namespace: "default",
-					UID:       types.UID("foo"),
-				},
-				Spec: corev1.PodSpec{
-					Volumes: []corev1.Volume{
-						{
-							Name: "csi-volume",
-							VolumeSource: corev1.VolumeSource{
-								CSI: &corev1.CSIVolumeSource{
-									Driver:           "secrets-store.csi.k8s.io",
-									VolumeAttributes: map[string]string{"secretProviderClass": "spc1"},
-									NodePublishSecretRef: &corev1.LocalObjectReference{
-										Name: "secret1",
-									},
-								},
-							},
-						},
-					},
-				},
-			},
-			socketPath: t.TempDir(),
-			secretToAdd: &corev1.Secret{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "secret1",
-					Namespace: "default",
-				},
-				Data: map[string][]byte{"clientid": []byte("clientid")},
-			},
-			expectedErr:         true,
-			expectedErrorEvents: true,
-		},
-	}
-
-	scheme, err := setupScheme()
-	g.Expect(err).NotTo(HaveOccurred())
-
-	for _, test := range tests {
-		t.Run(test.name, func(t *testing.T) {
-			kubeClient := fake.NewSimpleClientset(test.podToAdd, test.secretToAdd)
-			crdClient := secretsStoreFakeClient.NewSimpleClientset(test.secretProviderClassPodStatusToProcess, test.secretProviderClassToAdd)
-
-			initObjects := []client.Object{
-				test.podToAdd,
-				test.secretToAdd,
-				test.secretProviderClassPodStatusToProcess,
-				test.secretProviderClassToAdd,
-			}
-			client := controllerfake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjects...).Build()
-
-			testReconciler, err := newTestReconciler(client, kubeClient, crdClient, test.rotationPollInterval, test.socketPath)
-			g.Expect(err).NotTo(HaveOccurred())
-
-			err = testReconciler.secretStore.Run(wait.NeverStop)
-			g.Expect(err).NotTo(HaveOccurred())
-
-			serverEndpoint := fmt.Sprintf("%s/%s.sock", test.socketPath, "provider1")
-			defer os.Remove(serverEndpoint)
-
-			server, err := providerfake.NewMocKCSIProviderServer(serverEndpoint)
-			g.Expect(err).NotTo(HaveOccurred())
-			server.SetObjects(test.expectedObjectVersions)
-			err = server.Start()
-			g.Expect(err).NotTo(HaveOccurred())
-
-			err = testReconciler.reconcile(context.TODO(), test.secretProviderClassPodStatusToProcess)
-			g.Expect(err).To(HaveOccurred())
-			if test.expectedErrorEvents {
-				g.Expect(len(fakeRecorder.Events)).ToNot(BeNumerically("==", 0))
-				for len(fakeRecorder.Events) > 0 {
-					fmt.Println(<-fakeRecorder.Events)
-				}
-			}
-		})
-	}
-}
-
-func TestReconcileNoError(t *testing.T) {
-	g := NewWithT(t)
-
-	tests := []struct {
-		name                            string
-		nodePublishSecretRefSecretToAdd *corev1.Secret
-	}{
-		{
-			name: "filtered watch for nodePublishSecretRef",
-			nodePublishSecretRefSecretToAdd: &corev1.Secret{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "secret1",
-					Namespace: "default",
-					Labels: map[string]string{
-						controllers.SecretUsedLabel: "true",
-					},
-				},
-				Data: map[string][]byte{"clientid": []byte("clientid")},
-			},
-		},
-	}
-
-	for _, test := range tests {
-		secretProviderClassPodStatusToProcess := &secretsstorev1.SecretProviderClassPodStatus{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:      "pod1-default-spc1",
-				Namespace: "default",
-				Labels:    map[string]string{secretsstorev1.InternalNodeLabel: "nodeName"},
-			},
-			Status: secretsstorev1.SecretProviderClassPodStatusStatus{
-				SecretProviderClassName: "spc1",
-				PodName:                 "pod1",
-				TargetPath:              getTestTargetPath(t, "foo", "csi-volume"),
-				Objects: []secretsstorev1.SecretProviderClassObject{
-					{
-						ID:      "secret/object1",
-						Version: "v1",
-					},
-				},
-			},
-		}
-		secretProviderClassToAdd := &secretsstorev1.SecretProviderClass{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:      "spc1",
-				Namespace: "default",
-			},
-			Spec: secretsstorev1.SecretProviderClassSpec{
-				SecretObjects: []*secretsstorev1.SecretObject{
-					{
-						Data: []*secretsstorev1.SecretObjectData{
-							{
-								ObjectName: "object1",
-								Key:        "foo",
-							},
-						},
-						SecretName: "foosecret",
-						Type:       "Opaque",
-					},
-				},
-				Provider: "provider1",
-			},
-		}
-		podToAdd := &corev1.Pod{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:      "pod1",
-				Namespace: "default",
-				UID:       types.UID("foo"),
-			},
-			Spec: corev1.PodSpec{
-				Volumes: []corev1.Volume{
-					{
-						Name: "csi-volume",
-						VolumeSource: corev1.VolumeSource{
-							CSI: &corev1.CSIVolumeSource{
-								Driver:           "secrets-store.csi.k8s.io",
-								VolumeAttributes: map[string]string{"secretProviderClass": "spc1"},
-								NodePublishSecretRef: &corev1.LocalObjectReference{
-									Name: "secret1",
-								},
-							},
-						},
-					},
-				},
-			},
-		}
-		secretToBeRotated := &corev1.Secret{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:            "foosecret",
-				Namespace:       "default",
-				ResourceVersion: "12352",
-				Labels: map[string]string{
-					controllers.SecretManagedLabel: "true",
-				},
-			},
-			Data: map[string][]byte{"foo": []byte("olddata")},
-		}
-
-		socketPath := t.TempDir()
-		expectedObjectVersions := map[string]string{"secret/object1": "v2"}
-		scheme, err := setupScheme()
-		g.Expect(err).NotTo(HaveOccurred())
-
-		kubeClient := fake.NewSimpleClientset(podToAdd, test.nodePublishSecretRefSecretToAdd, secretToBeRotated)
-		crdClient := secretsStoreFakeClient.NewSimpleClientset(secretProviderClassPodStatusToProcess, secretProviderClassToAdd)
-
-		initObjects := []client.Object{
-			podToAdd,
-			secretToBeRotated,
-			test.nodePublishSecretRefSecretToAdd,
-			secretProviderClassPodStatusToProcess,
-			secretProviderClassToAdd,
-		}
-		ctrlClient := controllerfake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjects...).Build()
-
-		testReconciler, err := newTestReconciler(ctrlClient, kubeClient, crdClient, 60*time.Second, socketPath)
-		g.Expect(err).NotTo(HaveOccurred())
-		err = testReconciler.secretStore.Run(wait.NeverStop)
-		g.Expect(err).NotTo(HaveOccurred())
-
-		serverEndpoint := fmt.Sprintf("%s/%s.sock", socketPath, "provider1")
-		defer os.Remove(serverEndpoint)
-
-		server, err := providerfake.NewMocKCSIProviderServer(serverEndpoint)
-		g.Expect(err).NotTo(HaveOccurred())
-		server.SetObjects(expectedObjectVersions)
-		err = server.Start()
-		g.Expect(err).NotTo(HaveOccurred())
-
-		err = os.WriteFile(secretProviderClassPodStatusToProcess.Status.TargetPath+"/object1", []byte("newdata"), secretsstore.FilePermission)
-		g.Expect(err).NotTo(HaveOccurred())
-
-		err = testReconciler.reconcile(context.TODO(), secretProviderClassPodStatusToProcess)
-		g.Expect(err).NotTo(HaveOccurred())
-
-		// validate the secret provider class pod status versions have been updated
-		updatedSPCPodStatus, err := crdClient.SecretsstoreV1().SecretProviderClassPodStatuses(corev1.NamespaceDefault).Get(context.TODO(), "pod1-default-spc1", metav1.GetOptions{})
-		g.Expect(err).NotTo(HaveOccurred())
-		g.Expect(updatedSPCPodStatus.Status.Objects).To(Equal([]secretsstorev1.SecretProviderClassObject{{ID: "secret/object1", Version: "v2"}}))
-
-		// validate the secret data has been updated to the latest value
-		updatedSecret, err := kubeClient.CoreV1().Secrets(corev1.NamespaceDefault).Get(context.TODO(), "foosecret", metav1.GetOptions{})
-		g.Expect(err).NotTo(HaveOccurred())
-		g.Expect(updatedSecret.Data["foo"]).To(Equal([]byte("newdata")))
-
-		// 2 normal events - one for successfully updating the mounted contents and
-		// second for successfully rotating the K8s secret
-		g.Expect(len(fakeRecorder.Events)).To(BeNumerically("==", 2))
-		for len(fakeRecorder.Events) > 0 {
-			<-fakeRecorder.Events
-		}
-
-		// test with pod being terminated
-		podToAdd.DeletionTimestamp = &metav1.Time{Time: time.Now()}
-		kubeClient = fake.NewSimpleClientset(podToAdd, test.nodePublishSecretRefSecretToAdd)
-		initObjects = []client.Object{
-			podToAdd,
-			test.nodePublishSecretRefSecretToAdd,
-		}
-		ctrlClient = controllerfake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjects...).Build()
-		testReconciler, err = newTestReconciler(ctrlClient, kubeClient, crdClient, 60*time.Second, socketPath)
-		g.Expect(err).NotTo(HaveOccurred())
-		g.Expect(err).NotTo(HaveOccurred())
-
-		err = testReconciler.reconcile(context.TODO(), secretProviderClassPodStatusToProcess)
-		g.Expect(err).NotTo(HaveOccurred())
-
-		// test with pod being in succeeded phase
-		podToAdd.DeletionTimestamp = nil
-		podToAdd.Status.Phase = corev1.PodSucceeded
-		kubeClient = fake.NewSimpleClientset(podToAdd, test.nodePublishSecretRefSecretToAdd)
-		initObjects = []client.Object{
-			podToAdd,
-			test.nodePublishSecretRefSecretToAdd,
-		}
-		ctrlClient = controllerfake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjects...).Build()
-		testReconciler, err = newTestReconciler(ctrlClient, kubeClient, crdClient, 60*time.Second, socketPath)
-		g.Expect(err).NotTo(HaveOccurred())
-		g.Expect(err).NotTo(HaveOccurred())
-
-		err = testReconciler.reconcile(context.TODO(), secretProviderClassPodStatusToProcess)
-		g.Expect(err).NotTo(HaveOccurred())
-	}
-}
-
-func TestPatchSecret(t *testing.T) {
-	g := NewWithT(t)
-
-	tests := []struct {
-		name               string
-		secretToAdd        *corev1.Secret
-		secretName         string
-		expectedSecretData map[string][]byte
-		expectedErr        bool
-	}{
-		{
-			name:        "secret is not found",
-			secretToAdd: &corev1.Secret{},
-			secretName:  "secret1",
-			expectedErr: true,
-		},
-		{
-			name: "secret is found and data already matches",
-			secretToAdd: &corev1.Secret{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:            "secret1",
-					Namespace:       "default",
-					ResourceVersion: "16172",
-					Labels: map[string]string{
-						controllers.SecretManagedLabel: "true",
-					},
-				},
-				Data: map[string][]byte{"key1": []byte("value1")},
-			},
-			secretName:         "secret1",
-			expectedSecretData: map[string][]byte{"key1": []byte("value1")},
-			expectedErr:        false,
-		},
-		{
-			name: "secret is found and data is updated to latest",
-			secretToAdd: &corev1.Secret{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:            "secret1",
-					Namespace:       "default",
-					ResourceVersion: "16172",
-					Labels: map[string]string{
-						controllers.SecretManagedLabel: "true",
-					},
-				},
-				Data: map[string][]byte{"key1": []byte("value1")},
-			},
-			secretName:         "secret1",
-			expectedSecretData: map[string][]byte{"key2": []byte("value2")},
-			expectedErr:        false,
-		},
-		{
-			name: "secret is found and new data is appended to existing",
-			secretToAdd: &corev1.Secret{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:            "secret1",
-					Namespace:       "default",
-					ResourceVersion: "16172",
-					Labels: map[string]string{
-						controllers.SecretManagedLabel: "true",
-					},
-				},
-				Data: map[string][]byte{"key1": []byte("value1")},
-			},
-			secretName:         "secret1",
-			expectedSecretData: map[string][]byte{"key1": []byte("value1"), "key2": []byte("value2")},
-			expectedErr:        false,
-		},
-	}
-
-	for _, test := range tests {
-		t.Run(test.name, func(t *testing.T) {
-			scheme, err := setupScheme()
-			g.Expect(err).NotTo(HaveOccurred())
-
-			kubeClient := fake.NewSimpleClientset(test.secretToAdd)
-			crdClient := secretsStoreFakeClient.NewSimpleClientset()
-
-			initObjects := []client.Object{
-				test.secretToAdd,
-			}
-			ctrlClient := controllerfake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjects...).Build()
-
-			testReconciler, err := newTestReconciler(ctrlClient, kubeClient, crdClient, 60*time.Second, "")
-			g.Expect(err).NotTo(HaveOccurred())
-			err = testReconciler.secretStore.Run(wait.NeverStop)
-			g.Expect(err).NotTo(HaveOccurred())
-
-			err = testReconciler.patchSecret(context.TODO(), test.secretName, corev1.NamespaceDefault, test.expectedSecretData)
-			if test.expectedErr {
-				g.Expect(err).To(HaveOccurred())
-			} else {
-				g.Expect(err).NotTo(HaveOccurred())
-			}
-
-			if !test.expectedErr {
-				// check the secret data is what we expect it to
-				secret, err := kubeClient.CoreV1().Secrets(corev1.NamespaceDefault).Get(context.TODO(), test.secretName, metav1.GetOptions{})
-				g.Expect(err).NotTo(HaveOccurred())
-				g.Expect(secret.Data).To(Equal(test.expectedSecretData))
-			}
-		})
-	}
-}
-
-func TestHandleError(t *testing.T) {
-	g := NewWithT(t)
-
-	testReconciler, err := newTestReconciler(nil, nil, nil, 60*time.Second, "")
-	g.Expect(err).NotTo(HaveOccurred())
-
-	testReconciler.handleError(errors.New("failed error"), "key1", false)
-	// wait for the object to be requeued
-	time.Sleep(11 * time.Second)
-	g.Expect(testReconciler.queue.Len()).To(Equal(1))
-
-	for i := 0; i < 5; i++ {
-		time.Sleep(1 * time.Second)
-		testReconciler.handleError(errors.New("failed error"), "key1", true)
-		g.Expect(testReconciler.queue.NumRequeues("key1")).To(Equal(i + 1))
-
-		testReconciler.queue.Get()
-		testReconciler.queue.Done("key1")
-	}
-
-	// max number of requeues complete for key2, so now it should be removed from queue
-	testReconciler.handleError(errors.New("failed error"), "key1", true)
-	time.Sleep(1 * time.Second)
-	g.Expect(testReconciler.queue.Len()).To(Equal(1))
-}
-
-func getTestTargetPath(t *testing.T, uid, vol string) string {
-	path := filepath.Join(t.TempDir(), "pods", uid, "volumes", "kubernetes.io~csi", vol, "mount")
-	if err := os.MkdirAll(path, 0755); err != nil {
-		t.Fatalf("expected err to be nil, got: %+v", err)
-	}
-	return path
-}
diff --git a/pkg/rotation/stats_reporter.go b/pkg/rotation/stats_reporter.go
deleted file mode 100644
index bc3fa8066..000000000
--- a/pkg/rotation/stats_reporter.go
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package rotation
-
-import (
-	"context"
-	"runtime"
-
-	"go.opentelemetry.io/otel/attribute"
-	"go.opentelemetry.io/otel/metric"
-	"go.opentelemetry.io/otel/metric/global"
-)
-
-const (
-	scope = "sigs.k8s.io/secrets-store-csi-driver"
-)
-
-var (
-	providerKey = "provider"
-	errorKey    = "error_type"
-	osTypeKey   = "os_type"
-	rotatedKey  = "rotated"
-	runtimeOS   = runtime.GOOS
-)
-
-type reporter struct {
-	rotationReconcileTotal      metric.Int64Counter
-	rotationReconcileErrorTotal metric.Int64Counter
-	rotationReconcileDuration   metric.Float64Histogram
-}
-
-type StatsReporter interface {
-	reportRotationCtMetric(ctx context.Context, provider string, wasRotated bool)
-	reportRotationErrorCtMetric(ctx context.Context, provider, errType string, wasRotated bool)
-	reportRotationDuration(ctx context.Context, duration float64)
-}
-
-func newStatsReporter() (StatsReporter, error) {
-	var err error
-
-	r := &reporter{}
-	meter := global.Meter(scope)
-
-	if r.rotationReconcileTotal, err = meter.Int64Counter("rotation_reconcile", metric.WithDescription("Total number of rotation reconciles")); err != nil {
-		return nil, err
-	}
-	if r.rotationReconcileErrorTotal, err = meter.Int64Counter("rotation_reconcile_error", metric.WithDescription("Total number of rotation reconciles with error")); err != nil {
-		return nil, err
-	}
-	if r.rotationReconcileDuration, err = meter.Float64Histogram("rotation_reconcile_duration_sec", metric.WithDescription("Distribution of how long it took to rotate secrets-store content for pods")); err != nil {
-		return nil, err
-	}
-	return r, nil
-}
-
-func (r *reporter) reportRotationCtMetric(ctx context.Context, provider string, wasRotated bool) {
-	opt := metric.WithAttributes(
-		attribute.Key(providerKey).String(provider),
-		attribute.Key(osTypeKey).String(runtimeOS),
-		attribute.Key(rotatedKey).Bool(wasRotated),
-	)
-	r.rotationReconcileTotal.Add(ctx, 1, opt)
-}
-
-func (r *reporter) reportRotationErrorCtMetric(ctx context.Context, provider, errType string, wasRotated bool) {
-	opt := metric.WithAttributes(
-		attribute.Key(providerKey).String(provider),
-		attribute.Key(errorKey).String(errType),
-		attribute.Key(osTypeKey).String(runtimeOS),
-		attribute.Key(rotatedKey).Bool(wasRotated),
-	)
-	r.rotationReconcileErrorTotal.Add(ctx, 1, opt)
-}
-
-func (r *reporter) reportRotationDuration(ctx context.Context, duration float64) {
-	opt := metric.WithAttributes(
-		attribute.Key(osTypeKey).String(runtimeOS),
-	)
-	r.rotationReconcileDuration.Record(ctx, duration, opt)
-}
diff --git a/pkg/secrets-store/nodeserver.go b/pkg/secrets-store/nodeserver.go
index e2af8dcd0..ecbd91a43 100644
--- a/pkg/secrets-store/nodeserver.go
+++ b/pkg/secrets-store/nodeserver.go
@@ -26,13 +26,10 @@ import (
 	"time"
 
 	internalerrors "sigs.k8s.io/secrets-store-csi-driver/pkg/errors"
-	"sigs.k8s.io/secrets-store-csi-driver/pkg/k8s"
-	"sigs.k8s.io/secrets-store-csi-driver/pkg/util/fileutil"
 
 	"github.com/container-storage-interface/spec/lib/go/csi"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
-	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/klog/v2"
 	mount "k8s.io/mount-utils"
 	"sigs.k8s.io/controller-runtime/pkg/client"
@@ -47,7 +44,7 @@ type nodeServer struct {
 	// This should be used sparingly and only when the client does not fit the use case.
 	reader          client.Reader
 	providerClients *PluginClientBuilder
-	tokenClient     *k8s.TokenClient
+	rotationConfig  *rotationConfig
 }
 
 const (
@@ -73,17 +70,19 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
 	startTime := time.Now()
 	var parameters map[string]string
 	var providerName string
-	var podName, podNamespace, podUID, serviceAccountName string
+	var podName, podNamespace, podUID string
 	var targetPath string
 	var mounted bool
+	var isRemountRequest bool
 	errorReason := internalerrors.FailedToMount
+	rotationEnabled := ns.rotationConfig.enabled
 
 	defer func() {
 		if err != nil {
 			// if there is an error at any stage during node publish volume and if the path
 			// has already been mounted, unmount the target path so the next time kubelet calls
 			// again for mount, entire node publish volume is retried
-			if targetPath != "" && mounted {
+			if targetPath != "" && mounted && !isRemountRequest {
 				klog.InfoS("unmounting target path as node publish volume failed", "targetPath", targetPath, "pod", klog.ObjectRef{Namespace: podNamespace, Name: podName})
 				if unmountErr := ns.mounter.Unmount(targetPath); unmountErr != nil {
 					klog.ErrorS(unmountErr, "failed to unmounting target path")
@@ -120,7 +119,16 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
 	podName = attrib[CSIPodName]
 	podNamespace = attrib[CSIPodNamespace]
 	podUID = attrib[CSIPodUID]
-	serviceAccountName = attrib[CSIPodServiceAccountName]
+
+	if ns.rotationConfig.enabled {
+		lastModificationTime, err := ns.getLastUpdateTime(targetPath)
+		if err != nil {
+			klog.InfoS("could not find last modification time for targetpath", targetPath, "error", err)
+		} else if startTime.Before(lastModificationTime.Add(ns.rotationConfig.rotationPollInterval)) {
+			// if next rotation is not yet due, then skip the mount operation
+			return &csi.NodePublishVolumeResponse{}, nil
+		}
+	}
 
 	mounted, err = ns.ensureMountPoint(targetPath)
 	if err != nil {
@@ -135,7 +143,11 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
 			return nil, status.Errorf(codes.Internal, "failed to check if target path %s is mount point, err: %v", targetPath, err)
 		}
 	}
-	if mounted {
+	// If it is mounted, it means this is not the first time mount request for this path.
+	isRemountRequest = mounted
+
+	// If rotation is not enabled, don't remount the already mounted secrets.
+	if !rotationEnabled && mounted {
 		klog.InfoS("target path is already mounted", "targetPath", targetPath, "pod", klog.ObjectRef{Namespace: podNamespace, Name: podName})
 		return &csi.NodePublishVolumeResponse{}, nil
 	}
@@ -145,9 +157,7 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
 	if isMockProvider(providerName) {
 		// mock provider is used only for running sanity tests against the driver
 
-		// TODO: until requiresRemount (#585) is supported, "mounted" will always be false
-		// and this code will always be called
-		if !mounted {
+		if !rotationEnabled && !mounted {
 			err := ns.mounter.Mount("tmpfs", targetPath, "tmpfs", []string{})
 
 			if err != nil {
@@ -190,14 +200,7 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
 	// and send it to the provider in the parameters.
 	if parameters[CSIPodServiceAccountTokens] == "" {
 		// Inject pod service account token into volume attributes
-		serviceAccountTokenAttrs, err := ns.tokenClient.PodServiceAccountTokenAttrs(podNamespace, podName, serviceAccountName, types.UID(podUID))
-		if err != nil {
-			klog.ErrorS(err, "failed to get service account token attrs", "pod", klog.ObjectRef{Namespace: podNamespace, Name: podName})
-			return nil, err
-		}
-		for k, v := range serviceAccountTokenAttrs {
-			parameters[k] = v
-		}
+		klog.ErrorS(err, "csi.storage.k8s.io/serviceAccount.tokens is not populated, set RequiresRepublish")
 	}
 
 	// ensure it's read-only
@@ -238,7 +241,11 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
 	mounted = true
 	var objectVersions map[string]string
 	if objectVersions, errorReason, err = ns.mountSecretsStoreObjectContent(ctx, providerName, string(parametersStr), string(secretStr), targetPath, string(permissionStr), podName); err != nil {
-		klog.ErrorS(err, "failed to mount secrets store object content", "pod", klog.ObjectRef{Namespace: podNamespace, Name: podName})
+		klog.ErrorS(err, "failed to mount secrets store object content", "pod", klog.ObjectRef{Namespace: podNamespace, Name: podName}, "isRemountRequest", isRemountRequest)
+		if isRemountRequest {
+			// Mask error until fix available for https://github.com/kubernetes/kubernetes/issues/121271
+			return &csi.NodePublishVolumeResponse{}, nil
+		}
 		return nil, fmt.Errorf("failed to mount secrets store objects for pod %s/%s, err: %w", podNamespace, podName, err)
 	}
 
@@ -247,6 +254,11 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
 	// the pod with same name (pods created by statefulsets) is moved to a different node and the old SPCPS
 	// has not yet been garbage collected.
 	if err = createOrUpdateSecretProviderClassPodStatus(ctx, ns.client, ns.reader, podName, podNamespace, podUID, secretProviderClass, targetPath, ns.nodeID, true, objectVersions); err != nil {
+		klog.ErrorS(err, "failed to create/update spcps", "pod", klog.ObjectRef{Namespace: podNamespace, Name: podName}, "isRemountRequest", isRemountRequest)
+		if isRemountRequest {
+			// Mask error until fix available for https://github.com/kubernetes/kubernetes/issues/121271
+			return &csi.NodePublishVolumeResponse{}, nil
+		}
 		return nil, fmt.Errorf("failed to create secret provider class pod status for pod %s/%s, err: %w", podNamespace, podName, err)
 	}
 
@@ -296,13 +308,6 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
 		return nil, status.Error(codes.Internal, err.Error())
 	}
 
-	podUID := fileutil.GetPodUIDFromTargetPath(targetPath)
-	if podUID != "" {
-		// delete service account token from cache as the pod is deleted
-		// to ensure the cache isn't growing indefinitely
-		ns.tokenClient.DeleteServiceAccountToken(types.UID(podUID))
-	}
-
 	klog.InfoS("node unpublish volume complete", "targetPath", targetPath, "time", time.Since(startTime))
 	return &csi.NodeUnpublishVolumeResponse{}, nil
 }
diff --git a/pkg/secrets-store/nodeserver_test.go b/pkg/secrets-store/nodeserver_test.go
index cef12021f..07c4957df 100644
--- a/pkg/secrets-store/nodeserver_test.go
+++ b/pkg/secrets-store/nodeserver_test.go
@@ -24,7 +24,6 @@ import (
 	"time"
 
 	secretsstorev1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1"
-	"sigs.k8s.io/secrets-store-csi-driver/pkg/k8s"
 	"sigs.k8s.io/secrets-store-csi-driver/pkg/secrets-store/mocks"
 	providerfake "sigs.k8s.io/secrets-store-csi-driver/provider/fake"
 
@@ -33,14 +32,13 @@ import (
 	"google.golang.org/grpc/status"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime/schema"
-	fakeclient "k8s.io/client-go/kubernetes/fake"
 	"k8s.io/client-go/kubernetes/scheme"
 	mount "k8s.io/mount-utils"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/client/fake"
 )
 
-func testNodeServer(t *testing.T, client client.Client, reporter StatsReporter) (*nodeServer, error) {
+func testNodeServer(t *testing.T, client client.Client, reporter StatsReporter, rotationConfig *rotationConfig) (*nodeServer, error) {
 	t.Helper()
 
 	// Create a mock provider named "provider1".
@@ -56,7 +54,7 @@ func testNodeServer(t *testing.T, client client.Client, reporter StatsReporter)
 	t.Cleanup(server.Stop)
 
 	providerClients := NewPluginClientBuilder([]string{socketPath})
-	return newNodeServer("testnode", mount.NewFakeMounter([]mount.MountPoint{}), providerClients, client, client, reporter, k8s.NewTokenClient(fakeclient.NewSimpleClientset(), "test-driver", 1*time.Second))
+	return newNodeServer("testnode", mount.NewFakeMounter([]mount.MountPoint{}), providerClients, client, client, reporter, rotationConfig)
 }
 
 func TestNodePublishVolume_Errors(t *testing.T) {
@@ -230,7 +228,7 @@ func TestNodePublishVolume_Errors(t *testing.T) {
 		t.Run(test.name, func(t *testing.T) {
 			r := mocks.NewFakeReporter()
 
-			ns, err := testNodeServer(t, fake.NewClientBuilder().WithScheme(s).WithObjects(test.initObjects...).Build(), r)
+			ns, err := testNodeServer(t, fake.NewClientBuilder().WithScheme(s).WithObjects(test.initObjects...).Build(), r, &rotationConfig{})
 			if err != nil {
 				t.Fatalf("expected error to be nil, got: %+v", err)
 			}
@@ -270,6 +268,7 @@ func TestNodePublishVolume(t *testing.T) {
 		name              string
 		nodePublishVolReq *csi.NodePublishVolumeRequest
 		initObjects       []client.Object
+		rotationConfig    *rotationConfig
 	}{
 		{
 			name: "volume mount",
@@ -297,9 +296,13 @@ func TestNodePublishVolume(t *testing.T) {
 					},
 				},
 			},
+			rotationConfig: &rotationConfig{
+				enabled:              false,
+				rotationPollInterval: time.Minute,
+			},
 		},
 		{
-			name: "volume mount with refresh token",
+			name: "volume mount with refresh token ",
 			nodePublishVolReq: &csi.NodePublishVolumeRequest{
 				VolumeCapability: &csi.VolumeCapability{},
 				VolumeId:         "testvolid1",
@@ -327,6 +330,41 @@ func TestNodePublishVolume(t *testing.T) {
 					},
 				},
 			},
+			rotationConfig: &rotationConfig{
+				enabled:              true,
+				rotationPollInterval: -1 * time.Minute, // Using negative interval to pass the rotation interval check in unit tests
+			},
+		},
+		{
+			name: "volume mount with rotation but skipped",
+			nodePublishVolReq: &csi.NodePublishVolumeRequest{
+				VolumeCapability: &csi.VolumeCapability{},
+				VolumeId:         "testvolid1",
+				TargetPath:       targetPath(t),
+				VolumeContext: map[string]string{
+					"secretProviderClass": "provider1",
+					CSIPodName:            "pod1",
+					CSIPodNamespace:       "default",
+					CSIPodUID:             "poduid1",
+				},
+				Readonly: true,
+			},
+			initObjects: []client.Object{
+				&secretsstorev1.SecretProviderClass{
+					ObjectMeta: metav1.ObjectMeta{
+						Name:      "provider1",
+						Namespace: "default",
+					},
+					Spec: secretsstorev1.SecretProviderClassSpec{
+						Provider:   "provider1",
+						Parameters: map[string]string{"parameter1": "value1"},
+					},
+				},
+			},
+			rotationConfig: &rotationConfig{
+				enabled:              true,
+				rotationPollInterval: time.Minute,
+			},
 		},
 	}
 
@@ -341,7 +379,7 @@ func TestNodePublishVolume(t *testing.T) {
 		t.Run(test.name, func(t *testing.T) {
 			r := mocks.NewFakeReporter()
 
-			ns, err := testNodeServer(t, fake.NewClientBuilder().WithScheme(s).WithObjects(test.initObjects...).Build(), r)
+			ns, err := testNodeServer(t, fake.NewClientBuilder().WithScheme(s).WithObjects(test.initObjects...).Build(), r, test.rotationConfig)
 			if err != nil {
 				t.Fatalf("expected error to be nil, got: %+v", err)
 			}
@@ -368,8 +406,13 @@ func TestNodePublishVolume(t *testing.T) {
 				if err != nil {
 					t.Fatalf("expected err to be nil, got: %v", err)
 				}
-				if len(mnts) == 0 {
-					t.Errorf("expected mounts...: %v", mnts)
+				expectedMounts := 1
+				if ns.rotationConfig.enabled && ns.rotationConfig.rotationPollInterval > 0 {
+					// If due to rotation interval, NodePublishVolume has skipped, there should not be any mount operation
+					expectedMounts = 0
+				}
+				if len(mnts) != expectedMounts {
+					t.Errorf("[Number of mounts] want : %d, got mount: %d", expectedMounts, len(mnts))
 				}
 			}
 		})
@@ -384,7 +427,7 @@ func TestNodeUnpublishVolume(t *testing.T) {
 	)
 
 	r := mocks.NewFakeReporter()
-	ns, err := testNodeServer(t, fake.NewClientBuilder().WithScheme(s).Build(), r)
+	ns, err := testNodeServer(t, fake.NewClientBuilder().WithScheme(s).Build(), r, &rotationConfig{})
 	if err != nil {
 		t.Fatalf("expected error to be nil, got: %+v", err)
 	}
@@ -463,7 +506,7 @@ func TestNodeUnpublishVolume_Error(t *testing.T) {
 	for _, test := range tests {
 		t.Run(test.name, func(t *testing.T) {
 			r := mocks.NewFakeReporter()
-			ns, err := testNodeServer(t, fake.NewClientBuilder().WithScheme(s).Build(), r)
+			ns, err := testNodeServer(t, fake.NewClientBuilder().WithScheme(s).Build(), r, &rotationConfig{})
 			if err != nil {
 				t.Fatalf("expected error to be nil, got: %+v", err)
 			}
diff --git a/pkg/secrets-store/secrets-store.go b/pkg/secrets-store/secrets-store.go
index 7420b53b4..0c3d70518 100644
--- a/pkg/secrets-store/secrets-store.go
+++ b/pkg/secrets-store/secrets-store.go
@@ -19,8 +19,8 @@ package secretsstore
 import (
 	"context"
 	"os"
+	"time"
 
-	"sigs.k8s.io/secrets-store-csi-driver/pkg/k8s"
 	"sigs.k8s.io/secrets-store-csi-driver/pkg/version"
 
 	"k8s.io/klog/v2"
@@ -38,11 +38,16 @@ type SecretsStore struct {
 	ids *identityServer
 }
 
+// rotationConfig stores the information required to rotate the secrets.
+type rotationConfig struct {
+	enabled              bool
+	rotationPollInterval time.Duration
+}
+
 func NewSecretsStoreDriver(driverName, nodeID, endpoint string,
 	providerClients *PluginClientBuilder,
 	client client.Client,
-	reader client.Reader,
-	tokenClient *k8s.TokenClient) *SecretsStore {
+	reader client.Reader, rotationEnabled bool, rotationPollInterval time.Duration) *SecretsStore {
 	klog.InfoS("Initializing Secrets Store CSI Driver", "driver", driverName, "version", version.BuildVersion, "buildTime", version.BuildTime)
 
 	sr, err := NewStatsReporter()
@@ -50,7 +55,9 @@ func NewSecretsStoreDriver(driverName, nodeID, endpoint string,
 		klog.ErrorS(err, "failed to initialize stats reporter")
 		os.Exit(1)
 	}
-	ns, err := newNodeServer(nodeID, mount.New(""), providerClients, client, reader, sr, tokenClient)
+
+	rc := newRotationConfig(rotationEnabled, rotationPollInterval)
+	ns, err := newNodeServer(nodeID, mount.New(""), providerClients, client, reader, sr, rc)
 	if err != nil {
 		klog.ErrorS(err, "failed to initialize node server")
 		os.Exit(1)
@@ -70,7 +77,7 @@ func newNodeServer(nodeID string,
 	client client.Client,
 	reader client.Reader,
 	statsReporter StatsReporter,
-	tokenClient *k8s.TokenClient) (*nodeServer, error) {
+	rotationConfig *rotationConfig) (*nodeServer, error) {
 	return &nodeServer{
 		mounter:         mounter,
 		reporter:        statsReporter,
@@ -78,10 +85,17 @@ func newNodeServer(nodeID string,
 		client:          client,
 		reader:          reader,
 		providerClients: providerClients,
-		tokenClient:     tokenClient,
+		rotationConfig:  rotationConfig,
 	}, nil
 }
 
+func newRotationConfig(enabled bool, interval time.Duration) *rotationConfig {
+	return &rotationConfig{
+		enabled:              enabled,
+		rotationPollInterval: interval,
+	}
+}
+
 // Run starts the CSI plugin
 func (s *SecretsStore) Run(ctx context.Context) {
 	server := NewNonBlockingGRPCServer()
diff --git a/pkg/secrets-store/utils.go b/pkg/secrets-store/utils.go
index e709b8033..af3799785 100644
--- a/pkg/secrets-store/utils.go
+++ b/pkg/secrets-store/utils.go
@@ -21,6 +21,7 @@ import (
 	"fmt"
 	"os"
 	"strings"
+	"time"
 
 	secretsstorev1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1"
 	"sigs.k8s.io/secrets-store-csi-driver/pkg/util/runtimeutil"
@@ -75,6 +76,14 @@ func (ns *nodeServer) ensureMountPoint(target string) (bool, error) {
 	return false, nil
 }
 
+func (ns *nodeServer) getLastUpdateTime(target string) (time.Time, error) {
+	info, err := os.Stat(target)
+	if err != nil {
+		return time.Time{}, err
+	}
+	return info.ModTime(), nil
+}
+
 // getSecretProviderItem returns the secretproviderclass object by name and namespace
 func getSecretProviderItem(ctx context.Context, c client.Client, name, namespace string) (*secretsstorev1.SecretProviderClass, error) {
 	spc := &secretsstorev1.SecretProviderClass{}
diff --git a/test/bats/aws.bats b/test/bats/aws.bats
index 4144ccb0c..dffbc9fa9 100644
--- a/test/bats/aws.bats
+++ b/test/bats/aws.bats
@@ -81,7 +81,16 @@ teardown_file() {
    [[ "${result//$'\r'}" == "BeforeRotation" ]]
 
    aws ssm put-parameter --name $PM_ROTATION_TEST_NAME --value AfterRotation --type SecureString --overwrite --region $REGION
-   sleep 40
+
+   # Secret rotation is not immediate. So even after 30 seconds, older secret should be there.
+   sleep 30
+
+   result=$(kubectl --namespace $NAMESPACE exec $POD_NAME -- cat /mnt/secrets-store/$PM_ROTATION_TEST_NAME)
+   [[ "${result//$'\r'}" == "BeforeRotation" ]]
+
+   # After a minute, mounted secret should be rotated
+   sleep 60
+
    result=$(kubectl --namespace $NAMESPACE exec $POD_NAME -- cat /mnt/secrets-store/$PM_ROTATION_TEST_NAME)
    [[ "${result//$'\r'}" == "AfterRotation" ]]
 }
@@ -91,7 +100,7 @@ teardown_file() {
    [[ "${result//$'\r'}" == "BeforeRotation" ]]
   
    aws secretsmanager put-secret-value --secret-id $SM_ROT_TEST_NAME --secret-string AfterRotation --region $REGION
-   sleep 40
+   sleep 120
    result=$(kubectl --namespace $NAMESPACE exec $POD_NAME -- cat /mnt/secrets-store/$SM_ROT_TEST_NAME)
    [[ "${result//$'\r'}" == "AfterRotation" ]]
 }
diff --git a/test/bats/e2e-provider.bats b/test/bats/e2e-provider.bats
index a73b27200..dfb942753 100644
--- a/test/bats/e2e-provider.bats
+++ b/test/bats/e2e-provider.bats
@@ -83,8 +83,6 @@ export VALIDATE_TOKENS_AUDIENCE=$(get_token_requests_audience)
   run kubectl get clusterrole/secretproviderclasspodstatuses-viewer-role
   assert_success
 
-  run kubectl get clusterrole/secretproviderrotation-role
-  assert_success
 
   run kubectl get clusterrole/secretprovidersyncing-role
   assert_success
@@ -92,20 +90,8 @@ export VALIDATE_TOKENS_AUDIENCE=$(get_token_requests_audience)
   run kubectl get clusterrolebinding/secretproviderclasses-rolebinding
   assert_success
 
-  run kubectl get clusterrolebinding/secretproviderrotation-rolebinding
-  assert_success
-
   run kubectl get clusterrolebinding/secretprovidersyncing-rolebinding
   assert_success
-
-  # validate token request role and rolebinding only when token requests are set
-  if [[ -n "${VALIDATE_TOKENS_AUDIENCE}" ]]; then
-    run kubectl get clusterrole/secretprovidertokenrequest-role
-    assert_success
-
-    run kubectl get clusterrolebinding/secretprovidertokenrequest-rolebinding
-    assert_success
-  fi
 }
 
 @test "[v1alpha1] deploy e2e-provider secretproviderclass crd" {
@@ -164,6 +150,9 @@ export VALIDATE_TOKENS_AUDIENCE=$(get_token_requests_audience)
 
   result=$(kubectl exec secrets-store-inline-crd -- cat /mnt/secrets-store/$SECRET_NAME)
   [[ "${result//$'\r'}" == "${SECRET_VALUE}" ]]
+
+  sleep 10
+  archive_info
 }
 
 @test "CSI inline volume test with pod portability - read key from pod" {
@@ -403,7 +392,11 @@ export VALIDATE_TOKENS_AUDIENCE=$(get_token_requests_audience)
   local pod_ip=$(kubectl get pod -n kube-system -l app=csi-secrets-store-e2e-provider -o jsonpath="{.items[0].status.podIP}")
   run kubectl exec ${curl_pod_name} -n rotation -- curl http://${pod_ip}:8080/rotation?rotated=true
 
-  sleep 60
+  # wait for rotated secret to be mounted
+  sleep 120
+
+  # Save logs in case of failure in rotation
+  archive_info
 
   result=$(kubectl exec -n rotation secrets-store-inline-rotation -- cat /mnt/secrets-store/$SECRET_NAME)
   [[ "${result//$'\r'}" == "rotated" ]]
@@ -425,7 +418,6 @@ export VALIDATE_TOKENS_AUDIENCE=$(get_token_requests_audience)
     run kubectl exec ${curl_pod_name} -n metrics -- curl http://${pod_ip}:8095/metrics
     assert_match "node_publish_total" "${output}"
     assert_match "node_unpublish_total" "${output}"
-    assert_match "rotation_reconcile_total" "${output}"
   done
   # keeping metrics ns in upgrade tests has no relevance
   # the namespace is only to run a curl pod to validate metrics
diff --git a/test/bats/helpers.bash b/test/bats/helpers.bash
index 8e95e0c8f..9dd70b893 100644
--- a/test/bats/helpers.bash
+++ b/test/bats/helpers.bash
@@ -110,6 +110,8 @@ archive_info() {
   # print detailed pod information
   kubectl describe pods --all-namespaces > ${LOGS_DIR}/pods-describe.txt
 
+  kubectl describe csidriver secrets-store.csi.k8s.io >  ${LOGS_DIR}/csidriver-describe.txt
+
   # print logs from the CSI Driver
   #
   # assumes driver is installed with helm into the `kube-system` namespace which
diff --git a/test/bats/vault.bats b/test/bats/vault.bats
index 6291113f2..79d082ac5 100644
--- a/test/bats/vault.bats
+++ b/test/bats/vault.bats
@@ -100,11 +100,13 @@ EOF
   # update the secret value
   kubectl exec vault-0 --namespace=vault -- vault kv put secret/rotation foo=rotated
 
-  sleep 60
+  sleep 120
 
   # verify rotated value
   result=$(kubectl exec secrets-store-rotation -- cat /mnt/secrets-store/foo)
   [[ "$result" == "rotated" ]]
+
+  archive_info
 }
 
 @test "CSI inline volume test with pod portability - unmount succeeds" {
diff --git a/test/e2eprovider/go.mod b/test/e2eprovider/go.mod
index 4062a3e60..64f716944 100644
--- a/test/e2eprovider/go.mod
+++ b/test/e2eprovider/go.mod
@@ -40,7 +40,7 @@ require (
 	go.uber.org/atomic v1.10.0 // indirect
 	go.uber.org/multierr v1.8.0 // indirect
 	go.uber.org/zap v1.24.0 // indirect
-	golang.org/x/net v0.24.0 // indirect
+	golang.org/x/net v0.33.0 // indirect
 	golang.org/x/sys v0.28.0 // indirect
 	golang.org/x/text v0.21.0 // indirect
 	google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect
diff --git a/test/e2eprovider/go.sum b/test/e2eprovider/go.sum
index e0a2f69f6..3f949f53f 100644
--- a/test/e2eprovider/go.sum
+++ b/test/e2eprovider/go.sum
@@ -101,8 +101,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
-golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
diff --git a/test/sanity/sanity_test.go b/test/sanity/sanity_test.go
index 075aca5f2..d6700a96f 100644
--- a/test/sanity/sanity_test.go
+++ b/test/sanity/sanity_test.go
@@ -36,7 +36,7 @@ const (
 )
 
 func TestSanity(t *testing.T) {
-	driver := secretsstore.NewSecretsStoreDriver("secrets-store.csi.k8s.io", "somenodeid", endpoint, nil, nil, nil, nil)
+	driver := secretsstore.NewSecretsStoreDriver("secrets-store.csi.k8s.io", "somenodeid", endpoint, nil, nil, nil, false, time.Minute)
 	go func() {
 		driver.Run(context.Background())
 	}()