Skip to content

Commit 8a5cb7c

Browse files
committed
Support for taking ownership of resources managed by k0s
Signed-off-by: Xinzhao Xu <[email protected]>
1 parent c57f875 commit 8a5cb7c

File tree

3 files changed

+11
-2
lines changed

3 files changed

+11
-2
lines changed

docs/manifests.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ The use of Manifest Deployer is quite similar to the use the `kubectl apply` com
1515
- Each directory that is a direct descendant of `/var/lib/k0s/manifests` is considered to be its own "stack". Nested directories (further subfolders), however, are excluded from the stack mechanism and thus are not automatically deployed by the Manifest Deployer.
1616

1717
- k0s uses the indepenent stack mechanism for some of its internal in-cluster components, as well as for other resources. Be sure to only touch the manifests that are not managed by k0s.
18-
18+
- If you want to take ownership of certain resources, you can add the `k0s.k0sproject.io/managed: false` label to those resources, indicating that they are no longer managed by k0s and k0s will not update them anymore.
1919
- Explicitly define the namespace in the manifests (Manifest Deployer does not have a default namespace).
2020

2121
## Example

pkg/applier/meta.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,9 @@ const (
2929
// NameLabel stack label
3030
NameLabel = MetaPrefix + "/stack"
3131

32+
// ManagedLabel defines the label key used to indicate whether a resource is managed by k0s.
33+
ManagedLabel = MetaPrefix + "/managed"
34+
3235
// ChecksumAnnotation defines the annotation key to used for stack checksums
3336
ChecksumAnnotation = MetaPrefix + "/stack-checksum"
3437

pkg/applier/stack.go

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,11 @@ func (s *Stack) Apply(ctx context.Context, prune bool) error {
114114
errs = append(errs, err)
115115
continue
116116
} else { // The resource already exists, we need to update/patch it
117+
if serverResource.GetLabels()[ManagedLabel] == "false" {
118+
s.log.Debug("resource is not managed by k0s, skipping")
119+
continue
120+
}
121+
117122
localChecksum := resource.GetAnnotations()[ChecksumAnnotation]
118123
if serverResource.GetAnnotations()[ChecksumAnnotation] == localChecksum {
119124
s.log.Debug("resource checksums match, no need to update")
@@ -385,7 +390,8 @@ func (s *Stack) getPruneableResources(ctx context.Context, drClient dynamic.Reso
385390
for _, resource := range resourceList.Items {
386391
// We need to filter out objects that do not actually have the stack label set
387392
// There are some cases where we get "extra" results, e.g.: https://github.com/kubernetes-sigs/metrics-server/issues/604
388-
if !s.isInStack(resource) && len(resource.GetOwnerReferences()) == 0 && resource.GetLabels()[NameLabel] == s.Name {
393+
labels := resource.GetLabels()
394+
if !s.isInStack(resource) && len(resource.GetOwnerReferences()) == 0 && labels[NameLabel] == s.Name && labels[ManagedLabel] != "false" {
389395
s.log.Debugf("adding prunable resource: %s", generateResourceID(resource))
390396
pruneableResources = append(pruneableResources, resource)
391397
}

0 commit comments

Comments
 (0)