Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added ability to limit the LoadBalancers picked up by chisel-operator #153

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .well-known/funding-manifest-urls
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
https://fyralabs.com/funding.json
5 changes: 4 additions & 1 deletion charts/chisel-operator/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,15 @@ spec:
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "chisel-operator.serviceAccountName" . }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
env:
- name: "REQUIRE_OPERATOR_CLASS"
value: "{{.Values.global.requireOperatorClass}}"

{{- with .Values.nodeSelector }}
nodeSelector:
Expand All @@ -47,3 +49,4 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}


5 changes: 5 additions & 0 deletions charts/chisel-operator/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,11 @@ replicaCount: 1 # Right now only 1 replica is supported
# For now, we recommend running only 1 replica else Chisel Operator may constantly
# recreate resources, wasting your API resources and costing you money.

# Add the ability to limit to just the chisel opertator LoadBalancerClass
# Set requireOperatorClass: true to limit the LoadBalancers picked up by chisel-operator
global:
requireOperatorClass: false

image:
repository: ghcr.io/fyralabs/chisel-operator
pullPolicy: IfNotPresent
Expand Down
1 change: 1 addition & 0 deletions site/public/.well-known/funding-manifest-urls
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
https://fyralabs.com/funding.json
11 changes: 8 additions & 3 deletions src/cloud/aws.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ impl Provisioner for AWSProvisioner {
&self,
auth: Secret,
exit_node: ExitNode,
) -> color_eyre::Result<ExitNodeStatus> {
) -> color_eyre::Result<(ExitNodeStatus, Secret)> {
let provisioner = exit_node
.metadata
.annotations
Expand All @@ -127,6 +127,8 @@ impl Provisioner for AWSProvisioner {

let password = generate_password(32);

let secret = exit_node.generate_secret(password.clone()).await?;

let cloud_init_config = generate_cloud_init_config(&password, CHISEL_PORT);
let user_data = base64::engine::general_purpose::STANDARD.encode(cloud_init_config);

Expand Down Expand Up @@ -222,7 +224,7 @@ impl Provisioner for AWSProvisioner {
instance.instance_id.map(|id| id.to_string()).as_deref(),
);

Ok(exit_node)
Ok((exit_node, secret))
}

async fn update_exit_node(
Expand Down Expand Up @@ -268,7 +270,10 @@ impl Provisioner for AWSProvisioner {
} else {
warn!("No status found for exit node, creating new instance");
// TODO: this should be handled by the controller logic
return self.create_exit_node(auth, exit_node).await;
return self
.create_exit_node(auth, exit_node)
.await
.map(|(status, _)| status);
}
}

Expand Down
9 changes: 5 additions & 4 deletions src/cloud/digitalocean.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,12 @@ impl Provisioner for DigitalOceanProvisioner {
&self,
auth: Secret,
exit_node: ExitNode,
) -> color_eyre::Result<ExitNodeStatus> {
) -> color_eyre::Result<(ExitNodeStatus, Secret)> {
let password = generate_password(32);

// create secret for password too

let _secret = exit_node.generate_secret(password.clone()).await?;
let secret = exit_node.generate_secret(password.clone()).await?;

let config = generate_cloud_init_config(&password, exit_node.spec.port);

Expand Down Expand Up @@ -138,7 +138,7 @@ impl Provisioner for DigitalOceanProvisioner {
Some(&droplet_id),
);

Ok(exit_node)
Ok((exit_node, secret))
}

async fn update_exit_node(
Expand Down Expand Up @@ -170,7 +170,8 @@ impl Provisioner for DigitalOceanProvisioner {
} else {
warn!("No status found for exit node, creating new droplet");
// TODO: this should be handled by the controller logic
return self.create_exit_node(auth, exit_node).await;
let (status, _) = self.create_exit_node(auth, exit_node).await?;
return Ok(status);
}
}

Expand Down
12 changes: 8 additions & 4 deletions src/cloud/linode.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,11 @@ impl Provisioner for LinodeProvisioner {
&self,
auth: Secret,
exit_node: ExitNode,
) -> color_eyre::Result<ExitNodeStatus> {
) -> color_eyre::Result<(ExitNodeStatus, Secret)> {
let password = generate_password(32);

let _secret = exit_node.generate_secret(password.clone()).await?;
// Password for the server
let secret = exit_node.generate_secret(password.clone()).await?;

let config = generate_cloud_init_config(&password, exit_node.spec.port);

Expand Down Expand Up @@ -126,7 +127,7 @@ impl Provisioner for LinodeProvisioner {
Some(&instance.id.to_string()),
);

Ok(status)
Ok((status, secret))
}

async fn delete_exit_node(&self, auth: Secret, exit_node: ExitNode) -> color_eyre::Result<()> {
Expand Down Expand Up @@ -178,7 +179,10 @@ impl Provisioner for LinodeProvisioner {
Ok(status)
} else {
warn!("No instance status found, creating new instance");
return self.create_exit_node(auth.clone(), exit_node).await;
return self
.create_exit_node(auth.clone(), exit_node)
.await
.map(|(status, _)| status);
}
}
}
3 changes: 2 additions & 1 deletion src/cloud/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@ pub trait Provisioner {
&self,
auth: Secret,
exit_node: ExitNode,
) -> color_eyre::Result<ExitNodeStatus>;
// Should return the pointer to the password secret for the exit node
) -> color_eyre::Result<(ExitNodeStatus, Secret)>;
async fn update_exit_node(
&self,
auth: Secret,
Expand Down
52 changes: 41 additions & 11 deletions src/daemon.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,6 @@
There can also be a case where the user creates an exit node manually,
with the provisioner annotation set, in that case chisel operator will
create a cloud resource for that exit node and manages it.

todo: properly handle all this logic

todo: use `tracing` and put every operation in a span to make debugging easier
*/

use color_eyre::Result;
Expand All @@ -46,6 +42,8 @@ use std::{collections::BTreeMap, sync::Arc};
use std::time::Duration;
use tracing::{debug, error, info, instrument, warn};

use std::env;

use crate::{
cloud::Provisioner,
ops::{
Expand All @@ -58,6 +56,8 @@ use crate::{deployment::create_owned_deployment, error::ReconcileError};
pub const EXIT_NODE_FINALIZER: &str = "exitnode.chisel-operator.io/finalizer";
pub const SVCS_FINALIZER: &str = "service.chisel-operator.io/finalizer";

// todo: Refactor everything in here into separate functions, then we can write unit tests for them

// pub fn get_trace_id() -> opentelemetry::trace::TraceId {
// // opentelemetry::Context -> opentelemetry::trace::Span
// use opentelemetry::trace::TraceContextExt as _;
Expand Down Expand Up @@ -208,7 +208,7 @@ async fn select_exit_node_local(
.unwrap_or(false);

// Is the ExitNode not cloud provisioned or is the status set?
!is_cloud_provisioned || node.status.is_some()
(!is_cloud_provisioned || node.status.is_some()) && !node.is_assigned()
})
.collect::<Vec<ExitNode>>()
.first()
Expand Down Expand Up @@ -303,6 +303,13 @@ async fn exit_node_for_service(
async fn reconcile_svcs(obj: Arc<Service>, ctx: Arc<Context>) -> Result<Action, ReconcileError> {
// Return if service is not LoadBalancer or if the loadBalancerClass is not blank or set to $OPERATOR_CLASS

// Check if the REQUIRE_OPERATOR_CLASS environment variable is set
let limit_load_balancer_class;
match env::var("REQUIRE_OPERATOR_CLASS") {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

REQUIRE_OPERATOR_CLASS's envar key should be a const in case the name needs to be changed, so one can easily change them in case it does need to be changed

Ok(v) => limit_load_balancer_class = v,
Err(_e) => limit_load_balancer_class = "false".to_string(),
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This could be a bit more elegant by using unwrap_or_else instead of a named block

}

// todo: is there anything different need to be done for OpenShift? We use vanilla k8s and k3s/rke2 so we don't know
if obj
.spec
Expand All @@ -313,7 +320,7 @@ async fn reconcile_svcs(obj: Arc<Service>, ctx: Arc<Context>) -> Result<Action,
.spec
.as_ref()
.filter(|spec| {
spec.load_balancer_class.is_none()
(spec.load_balancer_class.is_none() && ( limit_load_balancer_class == "false"))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You could also cast the boolean to a string using FromStr

|| spec.load_balancer_class == Some(OPERATOR_CLASS.to_string())
})
.is_none()
Expand Down Expand Up @@ -582,6 +589,7 @@ async fn reconcile_nodes(obj: Arc<ExitNode>, ctx: Arc<Context>) -> Result<Action
}
}

// Find provisioner
let provisioner = find_exit_node_provisioner_from_label(
ctx.clone(),
&obj.namespace().unwrap(),
Expand All @@ -590,6 +598,7 @@ async fn reconcile_nodes(obj: Arc<ExitNode>, ctx: Arc<Context>) -> Result<Action
.await
.ok_or(ReconcileError::CloudProvisionerNotFound)?;

// Get provisioner API handle
let provisioner_api = provisioner.clone().spec.get_inner();

let secret = provisioner
Expand All @@ -598,6 +607,8 @@ async fn reconcile_nodes(obj: Arc<ExitNode>, ctx: Arc<Context>) -> Result<Action
.map_err(|_| crate::error::ReconcileError::CloudProvisionerSecretNotFound)?
.ok_or(ReconcileError::CloudProvisionerSecretNotFound)?;

debug!(?secret, "Secret");

finalizer::finalizer(
&exit_nodes.clone(),
EXIT_NODE_FINALIZER,
Expand All @@ -607,21 +618,40 @@ async fn reconcile_nodes(obj: Arc<ExitNode>, ctx: Arc<Context>) -> Result<Action
{
Event::Apply(node) => {
let _node = {
let mut pass_secret: Option<k8s_openapi::api::core::v1::Secret> = None;
// if status exists, update, else create
let cloud_resource = if let Some(_status) = node.status.as_ref() {
info!("Updating cloud resource for {}", node.name_any());
provisioner_api
.update_exit_node(secret.clone(), (*node).clone())
.await
} else {
// todo: probably update the Provisioner trait to accept a provisioner API handle or
// the provisioner API token *and* then a password secret
// Right now we have the create_exit_node method which returns the password secret alongside the status

// create cloud resource
info!("Creating cloud resource for {}", node.name_any());
provisioner_api

let (resource, new_pass_secret) = provisioner_api
.create_exit_node(secret.clone(), (*node).clone())
.await
.await?;
pass_secret = Some(new_pass_secret);
Ok(resource)
};
// TODO: Don't replace the entire status and object, sadly JSON is better here
let exitnode_patch = serde_json::json!({
"status": cloud_resource?
});
let exitnode_patch = if let Some(p_secret) = pass_secret {
serde_json::json!({
"status": cloud_resource?,
"spec": {
"auth": p_secret.name_any(),
}
})
} else {
serde_json::json!({
"status": cloud_resource?,
})
};

exit_nodes
.patch_status(
Expand Down
21 changes: 21 additions & 0 deletions src/ops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ impl ExitNode {
///
/// Generates a new secret with the `auth` key containing the auth string for chisel in the same namespace as the ExitNode
pub async fn generate_secret(&self, password: String) -> Result<Secret> {
debug!("Generating secret for ExitNode");
let secret_name = self.get_secret_name();

let auth_tmpl = format!("{}:{}", crate::cloud::pwgen::DEFAULT_USERNAME, password);
Expand Down Expand Up @@ -127,7 +128,27 @@ impl ExitNode {

Ok(secret)
}

/// Checks if the exit node is already assigned to a service
pub fn is_assigned(&self) -> bool {
self.metadata
.annotations
.as_ref()
.map(|annotations| annotations.contains_key(EXIT_NODE_NAME_LABEL))
.unwrap_or(false)
}

/// Gets the IP address of the exit node
pub fn get_ip(&self) -> Option<String> {
self.status.as_ref().map(|status| status.ip.clone())
}

/// Gets the name of the exit node
pub fn get_name(&self) -> Option<String> {
self.metadata.name.clone()
}
}

#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
pub struct ExitNodeStatus {
pub provider: String,
Expand Down