From 9320b9c40c73f492cb407d4aebd5bbecca953c9f Mon Sep 17 00:00:00 2001 From: Georgiana Dolocan Date: Fri, 22 Nov 2024 16:53:02 +0200 Subject: [PATCH] Replace hub_type for dask_nodes now that there is no daskhub helm chart --- .../templates/common/cluster-entry.yaml | 2 +- config/clusters/templates/gcp/cluster.yaml | 2 +- deployer/README.md | 1 - .../generate/dedicated_cluster/gcp.py | 6 +- .../generate/hub_asset/cluster_entry.py | 1 - eksctl/Udder-Cranberry1-Various | 10 -- eksctl/ubc-eoas copy.jsonnet | 136 ------------------ .../gcp/projects/cluster.tfvars.template | 2 +- 8 files changed, 8 insertions(+), 152 deletions(-) delete mode 100644 eksctl/Udder-Cranberry1-Various delete mode 100644 eksctl/ubc-eoas copy.jsonnet diff --git a/config/clusters/templates/common/cluster-entry.yaml b/config/clusters/templates/common/cluster-entry.yaml index ca4642fb2d..13008d601d 100644 --- a/config/clusters/templates/common/cluster-entry.yaml +++ b/config/clusters/templates/common/cluster-entry.yaml @@ -2,7 +2,7 @@ hubs: - name: {{ hub_name }} display_name: {{ cluster_name }} {{ hub_name }} domain: {{ hub_name }}.{{ cluster_name }}.2i2c.cloud - helm_chart: {{ hub_type }} + helm_chart: "basehub" helm_chart_values_files: - common.values.yaml - {{ hub_name }}.values.yaml diff --git a/config/clusters/templates/gcp/cluster.yaml b/config/clusters/templates/gcp/cluster.yaml index 0f56cf3b65..470a647c9b 100644 --- a/config/clusters/templates/gcp/cluster.yaml +++ b/config/clusters/templates/gcp/cluster.yaml @@ -31,7 +31,7 @@ hubs: [] # # Tip: consider changing this to something more human friendly # display_name: "{{ cluster_name }} - " # domain: .{{ cluster_name }}.2i2c.cloud - # helm_chart: {{ hub_type }} + # helm_chart: basehub # helm_chart_values_files: # - common.values.yaml # - .values.yaml diff --git a/deployer/README.md b/deployer/README.md index 141dabfa84..95b14fa0b6 100644 --- a/deployer/README.md +++ b/deployer/README.md @@ -260,7 +260,6 @@ for a GCP cluster. - `cluster_name` - the name of the cluster - `cluster_region`- the region where the cluster will be deployed - `project_id` - the project ID of the GCP project - - `hub_type` (basehub/daskhub) - whether the hub deployed there would need dask or not - `hub_name` - the name of the first hub which will be deployed in the cluster (usually `staging`) The templates have a set of default features and define some opinionated characteristics for the cluster. diff --git a/deployer/commands/generate/dedicated_cluster/gcp.py b/deployer/commands/generate/dedicated_cluster/gcp.py index c5622061a6..5327694858 100644 --- a/deployer/commands/generate/dedicated_cluster/gcp.py +++ b/deployer/commands/generate/dedicated_cluster/gcp.py @@ -63,6 +63,10 @@ def gcp( project_id: str = typer.Option( ..., prompt="Please insert the Project ID of the GCP project" ), + dask_nodes: bool = typer.Option( + False, + prompt='If this cluster needs dask nodes, please type "y", otherwise hit ENTER.', + ), force: bool = typer.Option( False, "--force", @@ -79,7 +83,7 @@ def gcp( # Also store the provider, as it's useful for some jinja templates # to differentiate between them when rendering the configuration "provider": "gcp", - "hub_type": "basehub", + "dask_nodes": dask_nodes, "cluster_name": cluster_name, "cluster_region": cluster_region, "project_id": project_id, diff --git a/deployer/commands/generate/hub_asset/cluster_entry.py b/deployer/commands/generate/hub_asset/cluster_entry.py index 3f9d094bbe..28132dc955 100644 --- a/deployer/commands/generate/hub_asset/cluster_entry.py +++ b/deployer/commands/generate/hub_asset/cluster_entry.py @@ -18,7 +18,6 @@ def cluster_entry( """ vars = { - "hub_type": "basehub", "cluster_name": cluster_name, "hub_name": hub_name, } diff --git a/eksctl/Udder-Cranberry1-Various b/eksctl/Udder-Cranberry1-Various deleted file mode 100644 index 1f5a570542..0000000000 --- a/eksctl/Udder-Cranberry1-Various +++ /dev/null @@ -1,10 +0,0 @@ -Udder-Cranberry1-Various - - - -ghg: Gloater-Stash3-Vice - -A6yK$EJ7v#^vaLPhX3&hJ3&n9tKM^2ga - - -Semicolon6-Area \ No newline at end of file diff --git a/eksctl/ubc-eoas copy.jsonnet b/eksctl/ubc-eoas copy.jsonnet deleted file mode 100644 index 975f4d2565..0000000000 --- a/eksctl/ubc-eoas copy.jsonnet +++ /dev/null @@ -1,136 +0,0 @@ -/* - This file is a jsonnet template of a eksctl's cluster configuration file, - that is used with the eksctl CLI to both update and initialize an AWS EKS - based cluster. - - This file has in turn been generated from eksctl/template.jsonnet which is - relevant to compare with for changes over time. - - To use jsonnet to generate an eksctl configuration file from this, do: - - jsonnet ubc-eoas.jsonnet > ubc-eoas.eksctl.yaml - - References: - - https://eksctl.io/usage/schema/ -*/ -local ng = import "./libsonnet/nodegroup.jsonnet"; - -// place all cluster nodes here -local clusterRegion = "ca-central-1"; -local masterAzs = ["ca-central-1a", "ca-central-1b", "ca-central-1d"]; -local nodeAz = "ca-central-1a"; - -// Node definitions for notebook nodes. Config here is merged -// with our notebook node definition. -// A `node.kubernetes.io/instance-type label is added, so pods -// can request a particular kind of node with a nodeSelector -local notebookNodes = [ - { instanceType: "r5.xlarge" }, - { instanceType: "r5.2xlarge" }, - { instanceType: "r5.4xlarge" }, - { instanceType: "r5.16xlarge" }, -]; - -local daskNodes = []; - - -{ - apiVersion: 'eksctl.io/v1alpha5', - kind: 'ClusterConfig', - metadata+: { - name: "ubc-eoas", - region: clusterRegion, - version: "1.30", - }, - availabilityZones: masterAzs, - iam: { - withOIDC: true, - }, - // If you add an addon to this config, run the create addon command. - // - // eksctl create addon --config-file=ubc-eoas.eksctl.yaml - // - addons: [ - { - // aws-ebs-csi-driver ensures that our PVCs are bound to PVs that - // couple to AWS EBS based storage, without it expect to see pods - // mounting a PVC failing to schedule and PVC resources that are - // unbound. - // - // Related docs: https://docs.aws.amazon.com/eks/latest/userguide/managing-ebs-csi.html - // - name: 'aws-ebs-csi-driver', - version: "latest", - wellKnownPolicies: { - ebsCSIController: true, - }, - }, - ], - nodeGroups: [ - n + {clusterName: $.metadata.name} for n in - [ - ng + { - namePrefix: 'core', - nameSuffix: 'a', - nameIncludeInstanceType: false, - availabilityZones: [nodeAz], - ssh: { - publicKeyPath: 'ssh-keys/ubc-eoas.key.pub' - }, - instanceType: "m5.xlarge", - minSize: 1, - maxSize: 6, - labels+: { - "hub.jupyter.org/node-purpose": "core", - "k8s.dask.org/node-purpose": "core" - }, - tags+: { - "2i2c:node-purpose": "core" - }, - }, - ] + [ - ng + { - namePrefix: "nb", - availabilityZones: [nodeAz], - minSize: 0, - maxSize: 500, - instanceType: n.instanceType, - ssh: { - publicKeyPath: 'ssh-keys/ubc-eoas.key.pub' - }, - labels+: { - "hub.jupyter.org/node-purpose": "user", - "k8s.dask.org/node-purpose": "scheduler" - }, - taints+: { - "hub.jupyter.org_dedicated": "user:NoSchedule", - "hub.jupyter.org/dedicated": "user:NoSchedule" - }, - } + n for n in notebookNodes - ] + ( if daskNodes != null then - [ - ng + { - namePrefix: "dask", - availabilityZones: [nodeAz], - minSize: 0, - maxSize: 500, - ssh: { - publicKeyPath: 'ssh-keys/ubc-eoas.key.pub' - }, - labels+: { - "k8s.dask.org/node-purpose": "worker" - }, - taints+: { - "k8s.dask.org_dedicated" : "worker:NoSchedule", - "k8s.dask.org/dedicated" : "worker:NoSchedule" - }, - instancesDistribution+: { - onDemandBaseCapacity: 0, - onDemandPercentageAboveBaseCapacity: 0, - spotAllocationStrategy: "capacity-optimized", - }, - } + n for n in daskNodes - ] else [] - ) - ] -} diff --git a/terraform/gcp/projects/cluster.tfvars.template b/terraform/gcp/projects/cluster.tfvars.template index 7ad5cd6581..d521896eaf 100644 --- a/terraform/gcp/projects/cluster.tfvars.template +++ b/terraform/gcp/projects/cluster.tfvars.template @@ -78,7 +78,7 @@ notebook_nodes = { } } -{% if hub_type == "daskhub" %} +{% if dask_nodes == "daskhub" %} dask_nodes = { # A not yet fully established policy is being developed about using a single # node pool, see https://github.com/2i2c-org/infrastructure/issues/2687.