Skip to content

Commit

Permalink
refactor, fix: Properly generate the auth keys for the exit node
Browse files Browse the repository at this point in the history
Exit nodes that are cloud-managed will no longer work without a secret

Co-authored-by: madomado <[email protected]>
  • Loading branch information
korewaChino and madonuko committed Jan 8, 2025
1 parent a89be23 commit dec7df7
Show file tree
Hide file tree
Showing 7 changed files with 70 additions and 40 deletions.
8 changes: 4 additions & 4 deletions src/cloud/aws.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@ impl Provisioner for AWSProvisioner {
&self,
auth: Secret,
exit_node: ExitNode,
node_password: String,
) -> color_eyre::Result<ExitNodeStatus> {
let provisioner = exit_node
.metadata
Expand All @@ -125,9 +126,7 @@ impl Provisioner for AWSProvisioner {
)
})?;

let password = generate_password(32);

let cloud_init_config = generate_cloud_init_config(&password, CHISEL_PORT);
let cloud_init_config = generate_cloud_init_config(&node_password, CHISEL_PORT);
let user_data = base64::engine::general_purpose::STANDARD.encode(cloud_init_config);

let aws_api: aws_config::SdkConfig = AWSIdentity::from_secret(&auth, self.region.clone())?
Expand Down Expand Up @@ -229,6 +228,7 @@ impl Provisioner for AWSProvisioner {
&self,
auth: Secret,
exit_node: ExitNode,
node_password: String,
) -> color_eyre::Result<ExitNodeStatus> {
let aws_api: aws_config::SdkConfig = AWSIdentity::from_secret(&auth, self.region.clone())?
.generate_aws_config()
Expand Down Expand Up @@ -268,7 +268,7 @@ impl Provisioner for AWSProvisioner {
} else {
warn!("No status found for exit node, creating new instance");
// TODO: this should be handled by the controller logic
return self.create_exit_node(auth, exit_node).await;
return self.create_exit_node(auth, exit_node, node_password).await;
}
}

Expand Down
6 changes: 3 additions & 3 deletions src/cloud/cloud_init.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
pub fn generate_cloud_init_config(password: &str, port: u16) -> String {
pub fn generate_cloud_init_config(auth_string: &str, port: u16) -> String {
let cloud_config = serde_json::json!({
"runcmd": ["curl https://i.jpillora.com/chisel! | bash", "systemctl enable --now chisel"],
"write_files": [{
Expand All @@ -19,14 +19,14 @@ RestartSec=1
User=root
# You can add any additional flags here
# This example uses port 9090 for the tunnel socket. `--reverse` is required for our use case.
ExecStart=/usr/local/bin/chisel server --port={port} --reverse --auth chisel:{password}
ExecStart=/usr/local/bin/chisel server --port={port} --reverse --auth {auth_string}
# Additional .env file for auth and secrets
EnvironmentFile=-/etc/sysconfig/chisel
PassEnvironment=AUTH
"#)
}, {
"path": "/etc/sysconfig/chisel",
"content": format!("AUTH=chisel:{}\n", password)
"content": format!("AUTH={auth_string}\n")
}]
});

Expand Down
13 changes: 5 additions & 8 deletions src/cloud/digitalocean.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,14 +61,10 @@ impl Provisioner for DigitalOceanProvisioner {
&self,
auth: Secret,
exit_node: ExitNode,
node_password: String,
) -> color_eyre::Result<ExitNodeStatus> {
let password = generate_password(32);

// create secret for password too

let _secret = exit_node.generate_secret(password.clone()).await?;

let config = generate_cloud_init_config(&password, exit_node.spec.port);
let config = generate_cloud_init_config(&node_password, exit_node.spec.port);

// TODO: Secret reference, not plaintext
let api: DigitalOceanApi = DigitalOceanApi::new(self.get_token(auth).await?);
Expand Down Expand Up @@ -137,7 +133,7 @@ impl Provisioner for DigitalOceanProvisioner {
droplet_ip.clone(),
Some(droplet_id),
);

debug!(?exit_node, "Created exit node!!");

Ok(exit_node)
Expand All @@ -147,6 +143,7 @@ impl Provisioner for DigitalOceanProvisioner {
&self,
auth: Secret,
exit_node: ExitNode,
node_password: String,
) -> color_eyre::Result<ExitNodeStatus> {
// check if droplet exists, then update it
let api: DigitalOceanApi = DigitalOceanApi::new(self.get_token(auth.clone()).await?);
Expand All @@ -172,7 +169,7 @@ impl Provisioner for DigitalOceanProvisioner {
} else {
warn!("No status found for exit node, creating new droplet");
// TODO: this should be handled by the controller logic
return self.create_exit_node(auth, exit_node).await;
return self.create_exit_node(auth, exit_node, node_password).await;
}
}

Expand Down
12 changes: 5 additions & 7 deletions src/cloud/linode.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,9 @@ impl Provisioner for LinodeProvisioner {
&self,
auth: Secret,
exit_node: ExitNode,
node_password: String,
) -> color_eyre::Result<ExitNodeStatus> {
let password = generate_password(32);

let _secret = exit_node.generate_secret(password.clone()).await?;

let config = generate_cloud_init_config(&password, exit_node.spec.port);
let config = generate_cloud_init_config(&node_password, exit_node.spec.port);

// Okay, so apparently Linode uses base64 for user_data, so let's
// base64 encode the config
Expand Down Expand Up @@ -87,7 +84,7 @@ impl Provisioner for LinodeProvisioner {

let mut instance = api
.create_instance(&self.region, &self.size)
.root_pass(&password)
.root_pass(&node_password)
.label(&name)
.user_data(&user_data)
.tags(vec![format!("chisel-operator-provisioner:{}", provisioner)])
Expand Down Expand Up @@ -152,6 +149,7 @@ impl Provisioner for LinodeProvisioner {
&self,
auth: Secret,
exit_node: ExitNode,
node_password: String,
) -> color_eyre::Result<ExitNodeStatus> {
let api = LinodeApi::new(self.get_token(&auth).await?);

Expand All @@ -178,7 +176,7 @@ impl Provisioner for LinodeProvisioner {
Ok(status)
} else {
warn!("No instance status found, creating new instance");
return self.create_exit_node(auth.clone(), exit_node).await;
return self.create_exit_node(auth.clone(), exit_node, node_password).await;
}
}
}
2 changes: 2 additions & 0 deletions src/cloud/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,13 @@ pub trait Provisioner {
&self,
auth: Secret,
exit_node: ExitNode,
node_password: String,
) -> color_eyre::Result<ExitNodeStatus>;
async fn update_exit_node(
&self,
auth: Secret,
exit_node: ExitNode,
node_password: String,
) -> color_eyre::Result<ExitNodeStatus>;
async fn delete_exit_node(&self, auth: Secret, exit_node: ExitNode) -> color_eyre::Result<()>;
}
Expand Down
60 changes: 42 additions & 18 deletions src/daemon.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ use color_eyre::Result;
use futures::{FutureExt, StreamExt};
use k8s_openapi::api::{
apps::v1::Deployment,
core::v1::{LoadBalancerIngress, LoadBalancerStatus, Service, ServiceStatus},
core::v1::{LoadBalancerIngress, LoadBalancerStatus, Secret, Service, ServiceStatus},
};
use kube::{
api::{Api, ListParams, Patch, PatchParams, ResourceExt},
Expand All @@ -47,7 +47,7 @@ use std::time::Duration;
use tracing::{debug, error, info, instrument, trace, warn};

use crate::{
cloud::Provisioner,
cloud::{pwgen::generate_password, Provisioner},
ops::{
parse_provisioner_label_value, ExitNode, ExitNodeProvisioner, ExitNodeSpec, ExitNodeStatus,
EXIT_NODE_NAME_LABEL, EXIT_NODE_PROVISIONER_LABEL,
Expand Down Expand Up @@ -218,7 +218,7 @@ async fn select_exit_node_local(
}

#[instrument(skip(ctx))]
/// Returns the ExitNode resource for a Service resource, either finding an existing one or creating a new one
/// Generates or returns an ExitNode resource for a Service resource, either finding an existing one or creating a new one
async fn exit_node_for_service(
ctx: Arc<Context>,
service: &Service,
Expand Down Expand Up @@ -258,7 +258,7 @@ async fn exit_node_for_service(
return Ok(exit_node);
}

let exit_node_tmpl = ExitNode {
let mut exit_node_tmpl = ExitNode {
metadata: ObjectMeta {
name: Some(exit_node_name.clone()),
namespace: service.namespace(),
Expand All @@ -285,6 +285,11 @@ async fn exit_node_for_service(
status: None,
};

let password = generate_password(32);
let secret = exit_node_tmpl.generate_secret(password.clone()).await?;

exit_node_tmpl.spec.auth = Some(secret.metadata.name.unwrap());

let serverside = PatchParams::apply(OPERATOR_MANAGER).validation_strict();

let exit_node = nodes
Expand Down Expand Up @@ -332,6 +337,7 @@ async fn reconcile_svcs(obj: Arc<Service>, ctx: Arc<Context>) -> Result<Action,
let obj = svc.clone();

let node_list = nodes.list(&ListParams::default().timeout(30)).await?;

// Find service binding of svc name/namespace?
let existing_node = node_list.iter().find(|node| {
node.metadata
Expand All @@ -341,6 +347,7 @@ async fn reconcile_svcs(obj: Arc<Service>, ctx: Arc<Context>) -> Result<Action,
.unwrap_or(false)
});

// XXX: Exit node manifest generation starts here
let node = {
if let Some(node) = existing_node {
node.clone()
Expand Down Expand Up @@ -523,13 +530,28 @@ async fn reconcile_nodes(obj: Arc<ExitNode>, ctx: Arc<Context>) -> Result<Action

return Ok(Action::await_change());
} else if is_managed {
// XXX: What the fuck.
let provisioner = obj
.metadata
.annotations
.as_ref()
.and_then(|annotations| annotations.get(EXIT_NODE_PROVISIONER_LABEL))
.unwrap();

// We should assume that every managed exit node comes with an `auth` key, which is a reference to a Secret
// that contains the password for the exit node.
// If it doesn't exist, then it's probably bugged, and we should return and error
let node_password = {
let Some(ref node_password_secret_name) = obj.clone().spec.auth else {
return Err(ReconcileError::ManagedExitNodeNoPasswordSet);
};
let secrets_api = Api::namespaced(ctx.client.clone(), &obj.namespace().unwrap());
let secret: Secret = secrets_api.get(node_password_secret_name).await?;
let Some(node_password) = secret.data.as_ref().unwrap().get("auth") else {
return Err(ReconcileError::AuthFieldNotSet);
};
String::from_utf8_lossy(&node_password.0).to_string()
};

trace!(?provisioner, "Provisioner");
if let Some(status) = &obj.status {
// Check for mismatch between annotation's provisioner and status' provisioner
Expand Down Expand Up @@ -592,7 +614,8 @@ async fn reconcile_nodes(obj: Arc<ExitNode>, ctx: Arc<Context>) -> Result<Action

let provisioner_api = provisioner.clone().spec.get_inner();

let secret = provisioner
// API key secret, do not use for node password
let api_key_secret = provisioner
.find_secret()
.await
.map_err(|_| crate::error::ReconcileError::CloudProvisionerSecretNotFound)?
Expand All @@ -603,24 +626,26 @@ async fn reconcile_nodes(obj: Arc<ExitNode>, ctx: Arc<Context>) -> Result<Action
EXIT_NODE_FINALIZER,
obj.clone(),
|event| async move {
let m: std::prelude::v1::Result<Action, crate::error::ReconcileError> = match event
{
let m: Result<_, crate::error::ReconcileError> = match event {
Event::Apply(node) => {
let _node = {
let _ = {
// XXX: We should get the value of the Secret and pass it in as node_password
let cloud_resource = if let Some(_status) = node.status.as_ref() {
info!("Updating cloud resource for {}", node.name_any());
provisioner_api
.update_exit_node(secret.clone(), (*node).clone())
.await
.update_exit_node(api_key_secret.clone(), (*node).clone(), node_password)
.await?
} else {
info!("Creating cloud resource for {}", node.name_any());
provisioner_api
.create_exit_node(secret.clone(), (*node).clone())
.await
.create_exit_node(api_key_secret.clone(), (*node).clone(), node_password)
.await?
};

// unwrap should be safe here since in k8s it is infallible for a Secret to not have a name
// TODO: Don't replace the entire status and object, sadly JSON is better here
let exitnode_patch = serde_json::json!({
"status": cloud_resource?
"status": cloud_resource,
});

exit_nodes
Expand All @@ -641,7 +666,7 @@ async fn reconcile_nodes(obj: Arc<ExitNode>, ctx: Arc<Context>) -> Result<Action
if is_managed {
info!("Deleting cloud resource for {}", node.name_any());
provisioner_api
.delete_exit_node(secret, (*node).clone())
.delete_exit_node(api_key_secret, (*node).clone())
.await
.unwrap_or_else(|e| {
error!(?e, "Error deleting exit node {}", node.name_any())
Expand All @@ -650,7 +675,6 @@ async fn reconcile_nodes(obj: Arc<ExitNode>, ctx: Arc<Context>) -> Result<Action
Ok(Action::requeue(Duration::from_secs(3600)))
}
};

m
},
)
Expand Down Expand Up @@ -707,7 +731,7 @@ pub async fn run() -> color_eyre::Result<()> {
client: client.clone(),
}),
)
.for_each(|_| futures::future::ready(()))
.for_each(|result_value| futures::future::ready(()))
.boxed(),
);

Expand All @@ -732,7 +756,7 @@ pub async fn run() -> color_eyre::Result<()> {
error_policy_exit_node,
Arc::new(Context { client }),
)
.for_each(|_| futures::future::ready(()))
.for_each(|result_value| futures::future::ready(()))
.boxed(),
);

Expand Down
9 changes: 9 additions & 0 deletions src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,15 @@ pub enum ReconcileError {
#[error("The secret keys for the cloud provisioner were not found in the cluster")]
CloudProvisionerSecretNotFound,

#[error("The managed exit node spec does not have a password set")]
ManagedExitNodeNoPasswordSet,

#[error("The Secret could not be found in the resource's namespace")]
SecretNotFound,

#[error("The `auth` field is not set in the Secret intended for the password")]
AuthFieldNotSet,

#[error("The operator has encountered an unknown error, this is most likely a bug: {0}")]
UnknownError(#[from] color_eyre::Report),
}

0 comments on commit dec7df7

Please sign in to comment.