From b818521d37ef0497c54a2cc46ff278628e35f44d Mon Sep 17 00:00:00 2001 From: Cappy Ishihara Date: Sat, 19 Oct 2024 18:20:18 +0700 Subject: [PATCH 1/7] Try to fix an issue where cloud-provisioned ExitNodes don't get their secret refs set --- src/cloud/aws.rs | 8 +++++--- src/cloud/digitalocean.rs | 9 +++++---- src/cloud/linode.rs | 9 +++++---- src/cloud/mod.rs | 3 ++- src/daemon.rs | 36 +++++++++++++++++++++++++++++++----- src/ops.rs | 1 + 6 files changed, 49 insertions(+), 17 deletions(-) diff --git a/src/cloud/aws.rs b/src/cloud/aws.rs index f723104..b3014ca 100644 --- a/src/cloud/aws.rs +++ b/src/cloud/aws.rs @@ -112,7 +112,7 @@ impl Provisioner for AWSProvisioner { &self, auth: Secret, exit_node: ExitNode, - ) -> color_eyre::Result { + ) -> color_eyre::Result<(ExitNodeStatus, Secret)> { let provisioner = exit_node .metadata .annotations @@ -127,6 +127,8 @@ impl Provisioner for AWSProvisioner { let password = generate_password(32); + let secret = exit_node.generate_secret(password.clone()).await?; + let cloud_init_config = generate_cloud_init_config(&password, CHISEL_PORT); let user_data = base64::engine::general_purpose::STANDARD.encode(cloud_init_config); @@ -222,7 +224,7 @@ impl Provisioner for AWSProvisioner { instance.instance_id.map(|id| id.to_string()).as_deref(), ); - Ok(exit_node) + Ok((exit_node, secret)) } async fn update_exit_node( @@ -268,7 +270,7 @@ impl Provisioner for AWSProvisioner { } else { warn!("No status found for exit node, creating new instance"); // TODO: this should be handled by the controller logic - return self.create_exit_node(auth, exit_node).await; + return self.create_exit_node(auth, exit_node).await.map(|(status, _)| status); } } diff --git a/src/cloud/digitalocean.rs b/src/cloud/digitalocean.rs index eeea31c..d58d987 100644 --- a/src/cloud/digitalocean.rs +++ b/src/cloud/digitalocean.rs @@ -61,12 +61,12 @@ impl Provisioner for DigitalOceanProvisioner { &self, auth: Secret, exit_node: ExitNode, - ) -> color_eyre::Result { + ) -> color_eyre::Result<(ExitNodeStatus, Secret)> { let password = generate_password(32); // create secret for password too - let _secret = exit_node.generate_secret(password.clone()).await?; + let secret = exit_node.generate_secret(password.clone()).await?; let config = generate_cloud_init_config(&password, exit_node.spec.port); @@ -138,7 +138,7 @@ impl Provisioner for DigitalOceanProvisioner { Some(&droplet_id), ); - Ok(exit_node) + Ok((exit_node, secret)) } async fn update_exit_node( @@ -170,7 +170,8 @@ impl Provisioner for DigitalOceanProvisioner { } else { warn!("No status found for exit node, creating new droplet"); // TODO: this should be handled by the controller logic - return self.create_exit_node(auth, exit_node).await; + let (status, _) = self.create_exit_node(auth, exit_node).await?; + return Ok(status); } } diff --git a/src/cloud/linode.rs b/src/cloud/linode.rs index 415d6df..594815e 100644 --- a/src/cloud/linode.rs +++ b/src/cloud/linode.rs @@ -54,10 +54,11 @@ impl Provisioner for LinodeProvisioner { &self, auth: Secret, exit_node: ExitNode, - ) -> color_eyre::Result { + ) -> color_eyre::Result<(ExitNodeStatus, Secret)> { let password = generate_password(32); - let _secret = exit_node.generate_secret(password.clone()).await?; + // Password for the server + let secret = exit_node.generate_secret(password.clone()).await?; let config = generate_cloud_init_config(&password, exit_node.spec.port); @@ -126,7 +127,7 @@ impl Provisioner for LinodeProvisioner { Some(&instance.id.to_string()), ); - Ok(status) + Ok((status, secret)) } async fn delete_exit_node(&self, auth: Secret, exit_node: ExitNode) -> color_eyre::Result<()> { @@ -178,7 +179,7 @@ impl Provisioner for LinodeProvisioner { Ok(status) } else { warn!("No instance status found, creating new instance"); - return self.create_exit_node(auth.clone(), exit_node).await; + return self.create_exit_node(auth.clone(), exit_node).await.map(|(status, _)| status); } } } diff --git a/src/cloud/mod.rs b/src/cloud/mod.rs index 36ab3e8..f6d5210 100644 --- a/src/cloud/mod.rs +++ b/src/cloud/mod.rs @@ -27,7 +27,8 @@ pub trait Provisioner { &self, auth: Secret, exit_node: ExitNode, - ) -> color_eyre::Result; + // Should return the pointer to the password secret for the exit node + ) -> color_eyre::Result<(ExitNodeStatus, Secret)>; async fn update_exit_node( &self, auth: Secret, diff --git a/src/daemon.rs b/src/daemon.rs index db211e0..9797dc9 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -58,6 +58,8 @@ use crate::{deployment::create_owned_deployment, error::ReconcileError}; pub const EXIT_NODE_FINALIZER: &str = "exitnode.chisel-operator.io/finalizer"; pub const SVCS_FINALIZER: &str = "service.chisel-operator.io/finalizer"; +// todo: Refactor everything in here into separate functions, then we can write unit tests for them + // pub fn get_trace_id() -> opentelemetry::trace::TraceId { // // opentelemetry::Context -> opentelemetry::trace::Span // use opentelemetry::trace::TraceContextExt as _; @@ -582,6 +584,7 @@ async fn reconcile_nodes(obj: Arc, ctx: Arc) -> Result, ctx: Arc) -> Result, ctx: Arc) -> Result, ctx: Arc) -> Result { let _node = { + + let mut pass_secret: Option = None; + // if status exists, update, else create let cloud_resource = if let Some(_status) = node.status.as_ref() { info!("Updating cloud resource for {}", node.name_any()); provisioner_api .update_exit_node(secret.clone(), (*node).clone()) .await } else { + // todo: probably update the Provisioner trait to accept a provisioner API handle or + // the provisioner API token *and* then a password secret + // Right now we have the create_exit_node method which returns the password secret alongside the status + + // create cloud resource info!("Creating cloud resource for {}", node.name_any()); - provisioner_api + + let (resource, new_pass_secret) = provisioner_api .create_exit_node(secret.clone(), (*node).clone()) - .await + .await?; + pass_secret = Some(new_pass_secret); + Ok(resource) }; // TODO: Don't replace the entire status and object, sadly JSON is better here - let exitnode_patch = serde_json::json!({ - "status": cloud_resource? - }); + let exitnode_patch = if let Some(p_secret) = pass_secret { + serde_json::json!({ + "status": cloud_resource?, + "spec": { + "auth": p_secret.name_any(), + } + }) + } else { + serde_json::json!({ + "status": cloud_resource?, + }) + }; exit_nodes .patch_status( diff --git a/src/ops.rs b/src/ops.rs index 05f2bb8..ea7ded5 100644 --- a/src/ops.rs +++ b/src/ops.rs @@ -88,6 +88,7 @@ impl ExitNode { /// /// Generates a new secret with the `auth` key containing the auth string for chisel in the same namespace as the ExitNode pub async fn generate_secret(&self, password: String) -> Result { + debug!("Generating secret for ExitNode"); let secret_name = self.get_secret_name(); let auth_tmpl = format!("{}:{}", crate::cloud::pwgen::DEFAULT_USERNAME, password); From ed0a76e95eb1b4a369590edb08422e73023898f1 Mon Sep 17 00:00:00 2001 From: lea Date: Thu, 24 Oct 2024 21:51:39 -0700 Subject: [PATCH 2/7] feat: add funding.json well-known --- site/public/.well-known/funding-manifest-urls | 1 + 1 file changed, 1 insertion(+) create mode 100644 site/public/.well-known/funding-manifest-urls diff --git a/site/public/.well-known/funding-manifest-urls b/site/public/.well-known/funding-manifest-urls new file mode 100644 index 0000000..0125972 --- /dev/null +++ b/site/public/.well-known/funding-manifest-urls @@ -0,0 +1 @@ +https://fyralabs.com/funding.json \ No newline at end of file From e47dfeb14bde3cb79fad07a6c61e320b32a20510 Mon Sep 17 00:00:00 2001 From: lea Date: Thu, 24 Oct 2024 22:10:41 -0700 Subject: [PATCH 3/7] feat: add funding.json well-known to repo --- .well-known/funding-manifest-urls | 1 + 1 file changed, 1 insertion(+) create mode 100644 .well-known/funding-manifest-urls diff --git a/.well-known/funding-manifest-urls b/.well-known/funding-manifest-urls new file mode 100644 index 0000000..0125972 --- /dev/null +++ b/.well-known/funding-manifest-urls @@ -0,0 +1 @@ +https://fyralabs.com/funding.json \ No newline at end of file From dcdb88fc95cdfc22a6b46c98621d794576895998 Mon Sep 17 00:00:00 2001 From: Cappy Ishihara Date: Thu, 14 Nov 2024 18:04:07 +0700 Subject: [PATCH 4/7] Add assignment checks to ExitNode --- src/daemon.rs | 7 +------ src/ops.rs | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/daemon.rs b/src/daemon.rs index 9797dc9..a8efaef 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -16,10 +16,6 @@ There can also be a case where the user creates an exit node manually, with the provisioner annotation set, in that case chisel operator will create a cloud resource for that exit node and manages it. - - todo: properly handle all this logic - - todo: use `tracing` and put every operation in a span to make debugging easier */ use color_eyre::Result; @@ -210,7 +206,7 @@ async fn select_exit_node_local( .unwrap_or(false); // Is the ExitNode not cloud provisioned or is the status set? - !is_cloud_provisioned || node.status.is_some() + (!is_cloud_provisioned || node.status.is_some()) && !node.is_assigned() }) .collect::>() .first() @@ -613,7 +609,6 @@ async fn reconcile_nodes(obj: Arc, ctx: Arc) -> Result { let _node = { - let mut pass_secret: Option = None; // if status exists, update, else create let cloud_resource = if let Some(_status) = node.status.as_ref() { diff --git a/src/ops.rs b/src/ops.rs index ea7ded5..4ae6c99 100644 --- a/src/ops.rs +++ b/src/ops.rs @@ -128,7 +128,27 @@ impl ExitNode { Ok(secret) } + + /// Checks if the exit node is already assigned to a service + pub fn is_assigned(&self) -> bool { + self.metadata + .annotations + .as_ref() + .map(|annotations| annotations.contains_key(EXIT_NODE_NAME_LABEL)) + .unwrap_or(false) + } + + /// Gets the IP address of the exit node + pub fn get_ip(&self) -> Option { + self.status.as_ref().map(|status| status.ip.clone()) + } + + /// Gets the name of the exit node + pub fn get_name(&self) -> Option { + self.metadata.name.clone() + } } + #[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] pub struct ExitNodeStatus { pub provider: String, From 1f590354c6481983a8edfa03e8565bcfe205dc8d Mon Sep 17 00:00:00 2001 From: Cappy Ishihara Date: Thu, 14 Nov 2024 18:09:02 +0700 Subject: [PATCH 5/7] cargo fmt --- src/cloud/aws.rs | 5 ++++- src/cloud/linode.rs | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/cloud/aws.rs b/src/cloud/aws.rs index b3014ca..fa50c8d 100644 --- a/src/cloud/aws.rs +++ b/src/cloud/aws.rs @@ -270,7 +270,10 @@ impl Provisioner for AWSProvisioner { } else { warn!("No status found for exit node, creating new instance"); // TODO: this should be handled by the controller logic - return self.create_exit_node(auth, exit_node).await.map(|(status, _)| status); + return self + .create_exit_node(auth, exit_node) + .await + .map(|(status, _)| status); } } diff --git a/src/cloud/linode.rs b/src/cloud/linode.rs index 594815e..627c3fe 100644 --- a/src/cloud/linode.rs +++ b/src/cloud/linode.rs @@ -179,7 +179,10 @@ impl Provisioner for LinodeProvisioner { Ok(status) } else { warn!("No instance status found, creating new instance"); - return self.create_exit_node(auth.clone(), exit_node).await.map(|(status, _)| status); + return self + .create_exit_node(auth.clone(), exit_node) + .await + .map(|(status, _)| status); } } } From 609334ce0d1598f78f6dd2441740f49d05646160 Mon Sep 17 00:00:00 2001 From: Cappy Ishihara Date: Mon, 9 Dec 2024 11:06:13 +0700 Subject: [PATCH 6/7] remove duplicate key --- charts/chisel-operator/templates/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/chisel-operator/templates/deployment.yaml b/charts/chisel-operator/templates/deployment.yaml index af36049..8f30f6f 100644 --- a/charts/chisel-operator/templates/deployment.yaml +++ b/charts/chisel-operator/templates/deployment.yaml @@ -26,7 +26,6 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} - serviceAccountName: {{ include "chisel-operator.serviceAccountName" . }} containers: - name: {{ .Chart.Name }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" @@ -47,3 +46,4 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} + \ No newline at end of file From 0fe7a538f374d063b99f36cb18e0ba24bedc8901 Mon Sep 17 00:00:00 2001 From: Gary Date: Sun, 5 Jan 2025 12:54:57 +0000 Subject: [PATCH 7/7] Added ability to limit the LoadBalancers picked up by chisel-operator --- charts/chisel-operator/templates/deployment.yaml | 3 +++ charts/chisel-operator/values.yaml | 5 +++++ src/daemon.rs | 11 ++++++++++- 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/charts/chisel-operator/templates/deployment.yaml b/charts/chisel-operator/templates/deployment.yaml index 8f30f6f..5dd5cc0 100644 --- a/charts/chisel-operator/templates/deployment.yaml +++ b/charts/chisel-operator/templates/deployment.yaml @@ -32,6 +32,9 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} resources: {{- toYaml .Values.resources | nindent 12 }} + env: + - name: "REQUIRE_OPERATOR_CLASS" + value: "{{.Values.global.requireOperatorClass}}" {{- with .Values.nodeSelector }} nodeSelector: diff --git a/charts/chisel-operator/values.yaml b/charts/chisel-operator/values.yaml index 0bab700..321ffed 100644 --- a/charts/chisel-operator/values.yaml +++ b/charts/chisel-operator/values.yaml @@ -7,6 +7,11 @@ replicaCount: 1 # Right now only 1 replica is supported # For now, we recommend running only 1 replica else Chisel Operator may constantly # recreate resources, wasting your API resources and costing you money. +# Add the ability to limit to just the chisel opertator LoadBalancerClass +# Set requireOperatorClass: true to limit the LoadBalancers picked up by chisel-operator +global: + requireOperatorClass: false + image: repository: ghcr.io/fyralabs/chisel-operator pullPolicy: IfNotPresent diff --git a/src/daemon.rs b/src/daemon.rs index a8efaef..a811a3e 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -42,6 +42,8 @@ use std::{collections::BTreeMap, sync::Arc}; use std::time::Duration; use tracing::{debug, error, info, instrument, warn}; +use std::env; + use crate::{ cloud::Provisioner, ops::{ @@ -301,6 +303,13 @@ async fn exit_node_for_service( async fn reconcile_svcs(obj: Arc, ctx: Arc) -> Result { // Return if service is not LoadBalancer or if the loadBalancerClass is not blank or set to $OPERATOR_CLASS + // Check if the REQUIRE_OPERATOR_CLASS environment variable is set + let limit_load_balancer_class; + match env::var("REQUIRE_OPERATOR_CLASS") { + Ok(v) => limit_load_balancer_class = v, + Err(_e) => limit_load_balancer_class = "false".to_string(), + } + // todo: is there anything different need to be done for OpenShift? We use vanilla k8s and k3s/rke2 so we don't know if obj .spec @@ -311,7 +320,7 @@ async fn reconcile_svcs(obj: Arc, ctx: Arc) -> Result