diff --git a/.well-known/funding-manifest-urls b/.well-known/funding-manifest-urls new file mode 100644 index 0000000..0125972 --- /dev/null +++ b/.well-known/funding-manifest-urls @@ -0,0 +1 @@ +https://fyralabs.com/funding.json \ No newline at end of file diff --git a/charts/chisel-operator/templates/deployment.yaml b/charts/chisel-operator/templates/deployment.yaml index af36049..5dd5cc0 100644 --- a/charts/chisel-operator/templates/deployment.yaml +++ b/charts/chisel-operator/templates/deployment.yaml @@ -26,13 +26,15 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} - serviceAccountName: {{ include "chisel-operator.serviceAccountName" . }} containers: - name: {{ .Chart.Name }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} resources: {{- toYaml .Values.resources | nindent 12 }} + env: + - name: "REQUIRE_OPERATOR_CLASS" + value: "{{.Values.global.requireOperatorClass}}" {{- with .Values.nodeSelector }} nodeSelector: @@ -47,3 +49,4 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} + \ No newline at end of file diff --git a/charts/chisel-operator/values.yaml b/charts/chisel-operator/values.yaml index 0bab700..321ffed 100644 --- a/charts/chisel-operator/values.yaml +++ b/charts/chisel-operator/values.yaml @@ -7,6 +7,11 @@ replicaCount: 1 # Right now only 1 replica is supported # For now, we recommend running only 1 replica else Chisel Operator may constantly # recreate resources, wasting your API resources and costing you money. +# Add the ability to limit to just the chisel opertator LoadBalancerClass +# Set requireOperatorClass: true to limit the LoadBalancers picked up by chisel-operator +global: + requireOperatorClass: false + image: repository: ghcr.io/fyralabs/chisel-operator pullPolicy: IfNotPresent diff --git a/site/public/.well-known/funding-manifest-urls b/site/public/.well-known/funding-manifest-urls new file mode 100644 index 0000000..0125972 --- /dev/null +++ b/site/public/.well-known/funding-manifest-urls @@ -0,0 +1 @@ +https://fyralabs.com/funding.json \ No newline at end of file diff --git a/src/cloud/aws.rs b/src/cloud/aws.rs index f723104..fa50c8d 100644 --- a/src/cloud/aws.rs +++ b/src/cloud/aws.rs @@ -112,7 +112,7 @@ impl Provisioner for AWSProvisioner { &self, auth: Secret, exit_node: ExitNode, - ) -> color_eyre::Result { + ) -> color_eyre::Result<(ExitNodeStatus, Secret)> { let provisioner = exit_node .metadata .annotations @@ -127,6 +127,8 @@ impl Provisioner for AWSProvisioner { let password = generate_password(32); + let secret = exit_node.generate_secret(password.clone()).await?; + let cloud_init_config = generate_cloud_init_config(&password, CHISEL_PORT); let user_data = base64::engine::general_purpose::STANDARD.encode(cloud_init_config); @@ -222,7 +224,7 @@ impl Provisioner for AWSProvisioner { instance.instance_id.map(|id| id.to_string()).as_deref(), ); - Ok(exit_node) + Ok((exit_node, secret)) } async fn update_exit_node( @@ -268,7 +270,10 @@ impl Provisioner for AWSProvisioner { } else { warn!("No status found for exit node, creating new instance"); // TODO: this should be handled by the controller logic - return self.create_exit_node(auth, exit_node).await; + return self + .create_exit_node(auth, exit_node) + .await + .map(|(status, _)| status); } } diff --git a/src/cloud/digitalocean.rs b/src/cloud/digitalocean.rs index eeea31c..d58d987 100644 --- a/src/cloud/digitalocean.rs +++ b/src/cloud/digitalocean.rs @@ -61,12 +61,12 @@ impl Provisioner for DigitalOceanProvisioner { &self, auth: Secret, exit_node: ExitNode, - ) -> color_eyre::Result { + ) -> color_eyre::Result<(ExitNodeStatus, Secret)> { let password = generate_password(32); // create secret for password too - let _secret = exit_node.generate_secret(password.clone()).await?; + let secret = exit_node.generate_secret(password.clone()).await?; let config = generate_cloud_init_config(&password, exit_node.spec.port); @@ -138,7 +138,7 @@ impl Provisioner for DigitalOceanProvisioner { Some(&droplet_id), ); - Ok(exit_node) + Ok((exit_node, secret)) } async fn update_exit_node( @@ -170,7 +170,8 @@ impl Provisioner for DigitalOceanProvisioner { } else { warn!("No status found for exit node, creating new droplet"); // TODO: this should be handled by the controller logic - return self.create_exit_node(auth, exit_node).await; + let (status, _) = self.create_exit_node(auth, exit_node).await?; + return Ok(status); } } diff --git a/src/cloud/linode.rs b/src/cloud/linode.rs index 415d6df..627c3fe 100644 --- a/src/cloud/linode.rs +++ b/src/cloud/linode.rs @@ -54,10 +54,11 @@ impl Provisioner for LinodeProvisioner { &self, auth: Secret, exit_node: ExitNode, - ) -> color_eyre::Result { + ) -> color_eyre::Result<(ExitNodeStatus, Secret)> { let password = generate_password(32); - let _secret = exit_node.generate_secret(password.clone()).await?; + // Password for the server + let secret = exit_node.generate_secret(password.clone()).await?; let config = generate_cloud_init_config(&password, exit_node.spec.port); @@ -126,7 +127,7 @@ impl Provisioner for LinodeProvisioner { Some(&instance.id.to_string()), ); - Ok(status) + Ok((status, secret)) } async fn delete_exit_node(&self, auth: Secret, exit_node: ExitNode) -> color_eyre::Result<()> { @@ -178,7 +179,10 @@ impl Provisioner for LinodeProvisioner { Ok(status) } else { warn!("No instance status found, creating new instance"); - return self.create_exit_node(auth.clone(), exit_node).await; + return self + .create_exit_node(auth.clone(), exit_node) + .await + .map(|(status, _)| status); } } } diff --git a/src/cloud/mod.rs b/src/cloud/mod.rs index 36ab3e8..f6d5210 100644 --- a/src/cloud/mod.rs +++ b/src/cloud/mod.rs @@ -27,7 +27,8 @@ pub trait Provisioner { &self, auth: Secret, exit_node: ExitNode, - ) -> color_eyre::Result; + // Should return the pointer to the password secret for the exit node + ) -> color_eyre::Result<(ExitNodeStatus, Secret)>; async fn update_exit_node( &self, auth: Secret, diff --git a/src/daemon.rs b/src/daemon.rs index db211e0..a811a3e 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -16,10 +16,6 @@ There can also be a case where the user creates an exit node manually, with the provisioner annotation set, in that case chisel operator will create a cloud resource for that exit node and manages it. - - todo: properly handle all this logic - - todo: use `tracing` and put every operation in a span to make debugging easier */ use color_eyre::Result; @@ -46,6 +42,8 @@ use std::{collections::BTreeMap, sync::Arc}; use std::time::Duration; use tracing::{debug, error, info, instrument, warn}; +use std::env; + use crate::{ cloud::Provisioner, ops::{ @@ -58,6 +56,8 @@ use crate::{deployment::create_owned_deployment, error::ReconcileError}; pub const EXIT_NODE_FINALIZER: &str = "exitnode.chisel-operator.io/finalizer"; pub const SVCS_FINALIZER: &str = "service.chisel-operator.io/finalizer"; +// todo: Refactor everything in here into separate functions, then we can write unit tests for them + // pub fn get_trace_id() -> opentelemetry::trace::TraceId { // // opentelemetry::Context -> opentelemetry::trace::Span // use opentelemetry::trace::TraceContextExt as _; @@ -208,7 +208,7 @@ async fn select_exit_node_local( .unwrap_or(false); // Is the ExitNode not cloud provisioned or is the status set? - !is_cloud_provisioned || node.status.is_some() + (!is_cloud_provisioned || node.status.is_some()) && !node.is_assigned() }) .collect::>() .first() @@ -303,6 +303,13 @@ async fn exit_node_for_service( async fn reconcile_svcs(obj: Arc, ctx: Arc) -> Result { // Return if service is not LoadBalancer or if the loadBalancerClass is not blank or set to $OPERATOR_CLASS + // Check if the REQUIRE_OPERATOR_CLASS environment variable is set + let limit_load_balancer_class; + match env::var("REQUIRE_OPERATOR_CLASS") { + Ok(v) => limit_load_balancer_class = v, + Err(_e) => limit_load_balancer_class = "false".to_string(), + } + // todo: is there anything different need to be done for OpenShift? We use vanilla k8s and k3s/rke2 so we don't know if obj .spec @@ -313,7 +320,7 @@ async fn reconcile_svcs(obj: Arc, ctx: Arc) -> Result, ctx: Arc) -> Result, ctx: Arc) -> Result, ctx: Arc) -> Result, ctx: Arc) -> Result { let _node = { + let mut pass_secret: Option = None; + // if status exists, update, else create let cloud_resource = if let Some(_status) = node.status.as_ref() { info!("Updating cloud resource for {}", node.name_any()); provisioner_api .update_exit_node(secret.clone(), (*node).clone()) .await } else { + // todo: probably update the Provisioner trait to accept a provisioner API handle or + // the provisioner API token *and* then a password secret + // Right now we have the create_exit_node method which returns the password secret alongside the status + + // create cloud resource info!("Creating cloud resource for {}", node.name_any()); - provisioner_api + + let (resource, new_pass_secret) = provisioner_api .create_exit_node(secret.clone(), (*node).clone()) - .await + .await?; + pass_secret = Some(new_pass_secret); + Ok(resource) }; // TODO: Don't replace the entire status and object, sadly JSON is better here - let exitnode_patch = serde_json::json!({ - "status": cloud_resource? - }); + let exitnode_patch = if let Some(p_secret) = pass_secret { + serde_json::json!({ + "status": cloud_resource?, + "spec": { + "auth": p_secret.name_any(), + } + }) + } else { + serde_json::json!({ + "status": cloud_resource?, + }) + }; exit_nodes .patch_status( diff --git a/src/ops.rs b/src/ops.rs index 05f2bb8..4ae6c99 100644 --- a/src/ops.rs +++ b/src/ops.rs @@ -88,6 +88,7 @@ impl ExitNode { /// /// Generates a new secret with the `auth` key containing the auth string for chisel in the same namespace as the ExitNode pub async fn generate_secret(&self, password: String) -> Result { + debug!("Generating secret for ExitNode"); let secret_name = self.get_secret_name(); let auth_tmpl = format!("{}:{}", crate::cloud::pwgen::DEFAULT_USERNAME, password); @@ -127,7 +128,27 @@ impl ExitNode { Ok(secret) } + + /// Checks if the exit node is already assigned to a service + pub fn is_assigned(&self) -> bool { + self.metadata + .annotations + .as_ref() + .map(|annotations| annotations.contains_key(EXIT_NODE_NAME_LABEL)) + .unwrap_or(false) + } + + /// Gets the IP address of the exit node + pub fn get_ip(&self) -> Option { + self.status.as_ref().map(|status| status.ip.clone()) + } + + /// Gets the name of the exit node + pub fn get_name(&self) -> Option { + self.metadata.name.clone() + } } + #[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] pub struct ExitNodeStatus { pub provider: String,