Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MCO-1100: enable RHEL entitlements in on-cluster layering with OCL API #4333

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions cmd/machine-config-controller/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,7 @@ func createControllers(ctx *ctrlcommon.ControllerContext) []ctrlcommon.Controlle
ctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(),
ctx.KubeInformerFactory.Core().V1().Nodes(),
ctx.KubeInformerFactory.Core().V1().Pods(),
ctx.InformerFactory.Machineconfiguration().V1alpha1().MachineOSBuilds(),
ctx.ConfigInformerFactory.Config().V1().Schedulers(),
ctx.ClientBuilder.KubeClientOrDie("node-update-controller"),
ctx.ClientBuilder.MachineConfigClientOrDie("node-update-controller"),
Expand Down
40 changes: 25 additions & 15 deletions cmd/machine-os-builder/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ import (
"fmt"
"os"

mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1"

"github.com/openshift/machine-config-operator/internal/clients"
"github.com/openshift/machine-config-operator/pkg/controller/build"
ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common"
Expand Down Expand Up @@ -37,6 +39,12 @@ func init() {
startCmd.PersistentFlags().StringVar(&startOpts.kubeconfig, "kubeconfig", "", "Kubeconfig file to access a remote cluster (testing only)")
}

func getMachineOSConfigs(ctx context.Context, cb *clients.Builder) (*mcfgv1alpha1.MachineOSConfigList, error) {
mcfgClient := cb.MachineConfigClientOrDie(componentName)
return mcfgClient.MachineconfigurationV1alpha1().MachineOSConfigs().List(ctx, metav1.ListOptions{})

}

// Checks if the on-cluster-build-config ConfigMap exists. If it exists, return the ConfigMap.
// If not, return an error.
func getBuildControllerConfigMap(ctx context.Context, cb *clients.Builder) (*corev1.ConfigMap, error) {
Expand All @@ -57,13 +65,8 @@ func getBuildControllerConfigMap(ctx context.Context, cb *clients.Builder) (*cor

// Creates a new BuildController configured for a certain image builder based
// upon the imageBuilderType key in the on-cluster-build-config ConfigMap.
func getBuildController(ctx context.Context, cb *clients.Builder) (*build.Controller, error) {
onClusterBuildConfigMap, err := getBuildControllerConfigMap(ctx, cb)
if err != nil {
return nil, err
}

imageBuilderType, err := build.GetImageBuilderType(onClusterBuildConfigMap)
func getBuildController(ctx context.Context, cb *clients.Builder) ([]*build.Controller, error) {
machineOSConfigs, err := getMachineOSConfigs(ctx, cb)
if err != nil {
return nil, err
}
Expand All @@ -72,11 +75,12 @@ func getBuildController(ctx context.Context, cb *clients.Builder) (*build.Contro
buildClients := build.NewClientsFromControllerContext(ctrlCtx)
cfg := build.DefaultBuildControllerConfig()

if imageBuilderType == build.OpenshiftImageBuilder {
return build.NewWithImageBuilder(cfg, buildClients), nil
}
controllersToStart := []*build.Controller{}

return build.NewWithCustomPodBuilder(cfg, buildClients), nil
for range machineOSConfigs.Items {
controllersToStart = append(controllersToStart, build.NewWithCustomPodBuilder(cfg, buildClients))
}
return controllersToStart, nil
}

func runStartCmd(_ *cobra.Command, _ []string) {
Expand All @@ -90,13 +94,12 @@ func runStartCmd(_ *cobra.Command, _ []string) {
klog.Infof("Version: %+v (%s)", version.Raw, version.Hash)

ctx, cancel := context.WithCancel(context.Background())

cb, err := clients.NewBuilder("")
if err != nil {
klog.Fatalln(err)
}

ctrl, err := getBuildController(ctx, cb)
controllers, err := getBuildController(ctx, cb)
if err != nil {
klog.Fatalln(err)
var invalidImageBuiler *build.ErrInvalidImageBuilder
Expand All @@ -107,7 +110,14 @@ func runStartCmd(_ *cobra.Command, _ []string) {
}
}

go ctrl.Run(ctx, 5)
<-ctx.Done()
// is this... allowed?
// since users can specify different settings per pool, we need to run a controller PER pool. Otherwise, settings will be conflated, as will failures and builds.
for _, ctrl := range controllers {
go ctrl.Run(ctx, 3)
<-ctx.Done()
cancel()
}

cancel()

}
20 changes: 20 additions & 0 deletions install/0000_80_machine-config_00_clusterreader_clusterrole.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -56,3 +56,23 @@ rules:
- get
- list
- watch
- apiGroups:
- machineconfiguration.openshift.io
resources:
- machineosconfigs
- machineosconfigs/status
verbs:
- create
- update
- patch
- get
- apiGroups:
- machineconfiguration.openshift.io
resources:
- machineosbuilds
- machineosbuilds/status
verbs:
- create
- update
- patch
- get
6 changes: 6 additions & 0 deletions manifests/machineconfigcontroller/clusterrole.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,12 @@ rules:
- apiGroups: ["operator.openshift.io"]
resources: ["machineconfigurations"]
verbs: ["get","list","watch"]
- apiGroups: ["machineconfiguration.openshift.io"]
resources: ["machineosconfigs", "machineosconfigs/status"]
verbs: ["create", "update", "patch", "get"]
- apiGroups: ["machineconfiguration.openshift.io"]
resources: ["machineosbuilds", "machineosbuilds/status"]
verbs: ["create", "update", "patch", "get"]
- apiGroups:
- authentication.k8s.io
resources:
Expand Down
79 changes: 79 additions & 0 deletions pkg/apihelpers/machineosbuild_apihelpers.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
package apihelpers

import (
mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// NewMachineOSBuildCondition creates a new MMachineOSBuild condition.
func NewMachineOSBuildCondition(condType string, status metav1.ConditionStatus, reason, message string) *metav1.Condition {
return &metav1.Condition{
Type: condType,
Status: status,
LastTransitionTime: metav1.Now(),
Reason: reason,
Message: message,
}
}

func GetMachineOSBuildCondition(status mcfgv1alpha1.MachineOSBuildStatus, condType mcfgv1alpha1.BuildProgress) *metav1.Condition {
// in case of sync errors, return the last condition that matches, not the first
// this exists for redundancy and potential race conditions.
var LatestState *metav1.Condition
for i := range status.Conditions {
c := status.Conditions[i]
if mcfgv1alpha1.BuildProgress(c.Type) == condType {
LatestState = &c
}
}
return LatestState
}

// SetMachineConfigPoolCondition updates the MachineConfigPool to include the provided condition. If the condition that
// we are about to add already exists and has the same status and reason then we are not going to update.
func SetMachineOSBuildCondition(status *mcfgv1alpha1.MachineOSBuildStatus, condition metav1.Condition) {
currentCond := GetMachineOSBuildCondition(*status, mcfgv1alpha1.BuildProgress(condition.Type))
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason && currentCond.Message == condition.Message {
return
}
// Do not update lastTransitionTime if the status of the condition doesn't change.
if currentCond != nil && currentCond.Status == condition.Status {
condition.LastTransitionTime = currentCond.LastTransitionTime
}

// this may not be necessary
newConditions := filterOutMachineOSBuildCondition(status.Conditions, mcfgv1alpha1.BuildProgress(condition.Type))
status.Conditions = append(newConditions, condition)
}

// RemoveMachineOSBuildCondition removes the MachineOSBuild condition with the provided type.
func RemoveMachineOSBuildCondition(status *mcfgv1alpha1.MachineOSBuildStatus, condType mcfgv1alpha1.BuildProgress) {
status.Conditions = filterOutMachineOSBuildCondition(status.Conditions, condType)
}

// filterOutMachineOSBuildCondition returns a new slice of MachineOSBuild conditions without conditions with the provided type.
func filterOutMachineOSBuildCondition(conditions []metav1.Condition, condType mcfgv1alpha1.BuildProgress) []metav1.Condition {
var newConditions []metav1.Condition
for _, c := range conditions {
if mcfgv1alpha1.BuildProgress(c.Type) == condType {
continue
}
newConditions = append(newConditions, c)
}
return newConditions
}

func IsMachineOSBuildConditionTrue(conditions []metav1.Condition, conditionType mcfgv1alpha1.BuildProgress) bool {
return IsMachineOSBuildConditionPresentAndEqual(conditions, conditionType, metav1.ConditionTrue)
}

// IsMachineOSBuildConditionPresentAndEqual returns true when conditionType is present and equal to status.
func IsMachineOSBuildConditionPresentAndEqual(conditions []metav1.Condition, conditionType mcfgv1alpha1.BuildProgress, status metav1.ConditionStatus) bool {
for _, condition := range conditions {
if mcfgv1alpha1.BuildProgress(condition.Type) == conditionType {
return condition.Status == status
}
}
return false
}
20 changes: 10 additions & 10 deletions pkg/controller/build/assets/Dockerfile.on-cluster-build-template
Original file line number Diff line number Diff line change
Expand Up @@ -5,22 +5,22 @@
# Decode and extract the MachineConfig from the gzipped ConfigMap and move it
# into position. We do this in a separate stage so that we don't have the
# gzipped MachineConfig laying around.
FROM {{.BaseImage.Pullspec}} AS extract
FROM {{.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec}} AS extract
COPY ./machineconfig/machineconfig.json.gz /tmp/machineconfig.json.gz
RUN mkdir -p /etc/machine-config-daemon && \
cat /tmp/machineconfig.json.gz | base64 -d | gunzip - > /etc/machine-config-daemon/currentconfig

{{if .ExtensionsImage.Pullspec}}
{{if .MachineOSConfig.Spec.BuildInputs.BaseOSExtensionsImagePullspec}}
# Pull our extensions image. Not sure yet what / how this should be wired up
# though. Ideally, I'd like to use some Buildah tricks to have the extensions
# directory mounted into the container at build-time so that I don't have to
# copy the RPMs into the container, configure the repo, and do the
# installation. Alternatively, I'd have to start a pod with an HTTP server.
FROM {{.ExtensionsImage.Pullspec}} AS extensions
FROM {{.MachineOSConfig.Spec.BuildInputs.BaseOSExtensionsImagePullspec}} AS extensions
{{end}}


FROM {{.BaseImage.Pullspec}} AS configs
FROM {{.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec}} AS configs
# Copy the extracted MachineConfig into the expected place in the image.
COPY --from=extract /etc/machine-config-daemon/currentconfig /etc/machine-config-daemon/currentconfig
# Do the ignition live-apply, extracting the Ignition config from the MachineConfig.
Expand All @@ -29,11 +29,11 @@ COPY --from=extract /etc/machine-config-daemon/currentconfig /etc/machine-config
RUN container="oci" exec -a ignition-apply /usr/lib/dracut/modules.d/30ignition/ignition --ignore-unsupported <(cat /etc/machine-config-daemon/currentconfig | jq '.spec.config') && \
ostree container commit

LABEL machineconfig={{.Pool.Spec.Configuration.Name}}
LABEL machineconfigpool={{.Pool.Name}}
LABEL releaseversion={{.ReleaseVersion}}
LABEL baseOSContainerImage={{.BaseImage.Pullspec}}
LABEL machineconfig={{.MachineOSBuild.Spec.DesiredConfig.Name}}
LABEL machineconfigpool={{.MachineOSConfig.Spec.MachineConfigPool.Name}}
LABEL releaseversion={{.MachineOSConfig.Spec.BuildInputs.ReleaseVersion}}
LABEL baseOSContainerImage={{.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec}}

{{if .CustomDockerfile}}
{{.CustomDockerfile}}
{{if .Containerfile}}
{{.Containerfile}}
{{end}}
62 changes: 56 additions & 6 deletions pkg/controller/build/assets/buildah-build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@
# custom build pod.
set -xeuo

ETC_PKI_ENTITLEMENT_MOUNTPOINT="${ETC_PKI_ENTITLEMENT_MOUNTPOINT:-}"
ETC_PKI_RPM_GPG_MOUNTPOINT="${ETC_PKI_RPM_GPG_MOUNTPOINT:-}"
ETC_YUM_REPOS_D_MOUNTPOINT="${ETC_YUM_REPOS_D_MOUNTPOINT:-}"

build_context="$HOME/context"

# Create a directory to hold our build context.
Expand All @@ -14,12 +18,58 @@ mkdir -p "$build_context/machineconfig"
cp /tmp/dockerfile/Dockerfile "$build_context"
cp /tmp/machineconfig/machineconfig.json.gz "$build_context/machineconfig/"

# Build our image using Buildah.
buildah bud \
--storage-driver vfs \
--authfile="$BASE_IMAGE_PULL_CREDS" \
--tag "$TAG" \
--file="$build_context/Dockerfile" "$build_context"
build_args=(
--log-level=DEBUG
--storage-driver vfs
--authfile="$BASE_IMAGE_PULL_CREDS"
--tag "$TAG"
--file="$build_context/Dockerfile"
)

mount_opts="z,rw"

# If we have RHSM certs, copy them into a tempdir to avoid SELinux issues, and
# tell Buildah about them.
rhsm_path="/var/run/secrets/rhsm"
if [[ -d "$rhsm_path" ]]; then
rhsm_certs="$(mktemp -d)"
cp -r -v "$rhsm_path/." "$rhsm_certs"
chmod -R 0755 "$rhsm_certs"
build_args+=("--volume=$rhsm_certs:/run/secrets/rhsm:$mount_opts")
fi

# If we have /etc/pki/entitlement certificates, commonly used with RHEL
# entitlements, copy them into a tempdir to avoid SELinux issues, and tell
# Buildah about them.
if [[ -n "$ETC_PKI_ENTITLEMENT_MOUNTPOINT" ]] && [[ -d "$ETC_PKI_ENTITLEMENT_MOUNTPOINT" ]]; then
configs="$(mktemp -d)"
cp -r -v "$ETC_PKI_ENTITLEMENT_MOUNTPOINT/." "$configs"
chmod -R 0755 "$configs"
build_args+=("--volume=$configs:$ETC_PKI_ENTITLEMENT_MOUNTPOINT:$mount_opts")
fi

# If we have /etc/yum.repos.d configs, commonly used with Red Hat Satellite
# subscriptions, copy them into a tempdir to avoid SELinux issues, and tell
# Buildah about them.
if [[ -n "$ETC_YUM_REPOS_D_MOUNTPOINT" ]] && [[ -d "$ETC_YUM_REPOS_D_MOUNTPOINT" ]]; then
configs="$(mktemp -d)"
cp -r -v "$ETC_YUM_REPOS_D_MOUNTPOINT/." "$configs"
chmod -R 0755 "$configs"
build_args+=("--volume=$configs:$ETC_YUM_REPOS_D_MOUNTPOINT:$mount_opts")
fi

# If we have /etc/pki/rpm-gpg configs, commonly used with Red Hat Satellite
# subscriptions, copy them into a tempdir to avoid SELinux issues, and tell
# Buildah about them.
if [[ -n "$ETC_PKI_RPM_GPG_MOUNTPOINT" ]] && [[ -d "$ETC_PKI_RPM_GPG_MOUNTPOINT" ]]; then
configs="$(mktemp -d)"
cp -r -v "$ETC_PKI_RPM_GPG_MOUNTPOINT/." "$configs"
chmod -R 0755 "$configs"
build_args+=("--volume=$configs:$ETC_PKI_RPM_GPG_MOUNTPOINT:$mount_opts")
fi

# Build our image.
buildah bud "${build_args[@]}" "$build_context"

# Push our built image.
buildah push \
Expand Down
Loading