diff --git a/.github/workflows/deploy-hubs.yaml b/.github/workflows/deploy-hubs.yaml index 9b877dfca1..493a581596 100644 --- a/.github/workflows/deploy-hubs.yaml +++ b/.github/workflows/deploy-hubs.yaml @@ -44,7 +44,7 @@ on: # https://github.blog/changelog/2021-04-19-github-actions-limit-workflow-run-or-job-concurrency/ concurrency: deploy -# This environment variable triggers the deployer to colourise print statments in the +# This environment variable triggers the deployer to colourise print statements in the # GitHug Actions logs for easy reading env: TERM: xterm @@ -85,7 +85,7 @@ jobs: uses: actions/cache@v3 with: path: ~/.cache/pip - # key determines if we define or re-use an existing cache or not. Our + # key determines if we define or reuse an existing cache or not. Our # key ensure we cache within a workflow run and its attempts, but not # between workflow runs. key: "${{ github.run_id }}" diff --git a/.github/workflows/ensure-uptime-checks.yaml b/.github/workflows/ensure-uptime-checks.yaml index 8c2ad4c224..d5ca8a2d64 100644 --- a/.github/workflows/ensure-uptime-checks.yaml +++ b/.github/workflows/ensure-uptime-checks.yaml @@ -19,7 +19,7 @@ on: # https://github.blog/changelog/2021-04-19-github-actions-limit-workflow-run-or-job-concurrency/ concurrency: uptime-checks -# This environment variable triggers the deployer to colourise print statments in the +# This environment variable triggers the deployer to colourise print statements in the # GitHub Actions logs for easy reading env: TERM: xterm diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 99a5dafb94..f70d6bcfb3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -55,6 +55,14 @@ repos: # Add files here if they contain the word 'secret' but should not be encrypted exclude: secrets\.md|helm-charts/support/templates/prometheus-ingres-auth/secret\.yaml|helm-charts/basehub/templates/dex/secret\.yaml|helm-charts/basehub/templates/static/secret\.yaml|config/clusters/templates/common/support\.secret\.values\.yaml|helm-charts/basehub/templates/ingress-auth/secret\.yaml + # Prevent known typos from being committed + - repo: https://github.com/codespell-project/codespell + rev: v2.2.5 + hooks: + - id: codespell + additional_dependencies: + - tomli + # pre-commit.ci config reference: https://pre-commit.ci/#configuration ci: autoupdate_schedule: monthly diff --git a/README.md b/README.md index 70136f4163..538fdc460b 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Infrastructure for deployments -This repository contains deployment infrastucture and documentation for a federation of JupyterHubs that 2i2c manages for various communities. +This repository contains deployment infrastructure and documentation for a federation of JupyterHubs that 2i2c manages for various communities. See [the infrastructure documentation](https://infrastructure.2i2c.org) for more information. diff --git a/config/clusters/2i2c-aws-us/go-bgc.values.yaml b/config/clusters/2i2c-aws-us/go-bgc.values.yaml index d66e38ef4c..b6eda53924 100644 --- a/config/clusters/2i2c-aws-us/go-bgc.values.yaml +++ b/config/clusters/2i2c-aws-us/go-bgc.values.yaml @@ -59,7 +59,7 @@ jupyterhub: # https://github.com/2i2c-org/infrastructure/issues/2121. # # - Memory requests are different from the description, based on: - # whats found to remain allocate in k8s, subtracting 1GiB + # what's found to remain allocate in k8s, subtracting 1GiB # overhead for misc system pods, and transitioning from GB in # description to GiB in mem_guarantee # https://cloud.google.com/kubernetes-engine/docs/concepts/plan-node-sizes. diff --git a/config/clusters/2i2c-aws-us/itcoocean.values.yaml b/config/clusters/2i2c-aws-us/itcoocean.values.yaml index ee89a25b0d..99b8abf493 100644 --- a/config/clusters/2i2c-aws-us/itcoocean.values.yaml +++ b/config/clusters/2i2c-aws-us/itcoocean.values.yaml @@ -86,7 +86,7 @@ jupyterhub: # https://github.com/2i2c-org/infrastructure/issues/2121. # # - Memory requests are different from the description, based on: - # whats found to remain allocate in k8s, subtracting 1GiB + # what's found to remain allocate in k8s, subtracting 1GiB # overhead for misc system pods, and transitioning from GB in # description to GiB in mem_guarantee # https://cloud.google.com/kubernetes-engine/docs/concepts/plan-node-sizes. diff --git a/config/clusters/2i2c-aws-us/ncar-cisl.values.yaml b/config/clusters/2i2c-aws-us/ncar-cisl.values.yaml index 19d84efe5a..7e9ee9d2f2 100644 --- a/config/clusters/2i2c-aws-us/ncar-cisl.values.yaml +++ b/config/clusters/2i2c-aws-us/ncar-cisl.values.yaml @@ -41,8 +41,8 @@ basehub: - read:org Authenticator: admin_users: - - kcote-ncar # Ken Cote, Initial adminstrator - - NicholasCote # Nicholas Cote, Initial adminstrator + - kcote-ncar # Ken Cote, Initial administrator + - NicholasCote # Nicholas Cote, Initial administrator - nwehrheim # Nick Wehrheim, Community representative singleuser: image: @@ -60,7 +60,7 @@ basehub: # https://github.com/2i2c-org/infrastructure/issues/2121. # # - Memory requests are different from the description, based on: - # whats found to remain allocate in k8s, subtracting 1GiB + # what's found to remain allocate in k8s, subtracting 1GiB # overhead for misc system pods, and transitioning from GB in # description to GiB in mem_guarantee. # - CPU requests are lower than the description, with a factor of diff --git a/config/clusters/catalystproject-africa/must.values.yaml b/config/clusters/catalystproject-africa/must.values.yaml index 88501779b6..b8b838f089 100644 --- a/config/clusters/catalystproject-africa/must.values.yaml +++ b/config/clusters/catalystproject-africa/must.values.yaml @@ -57,7 +57,7 @@ jupyterhub: # https://github.com/2i2c-org/infrastructure/issues/2121. # # - Memory requests are different from the description, based on: - # whats found to remain allocate in k8s, subtracting 1GiB + # what's found to remain allocate in k8s, subtracting 1GiB # overhead for misc system pods, and transitioning from GB in # description to GiB in mem_guarantee. # - CPU requests are lower than the description, with a factor of diff --git a/config/clusters/catalystproject-africa/nm-aist.values.yaml b/config/clusters/catalystproject-africa/nm-aist.values.yaml index 0c8d95a4ae..23d8c138ea 100644 --- a/config/clusters/catalystproject-africa/nm-aist.values.yaml +++ b/config/clusters/catalystproject-africa/nm-aist.values.yaml @@ -56,7 +56,7 @@ jupyterhub: # https://github.com/2i2c-org/infrastructure/issues/2121. # # - Memory requests are different from the description, based on: - # whats found to remain allocate in k8s, subtracting 1GiB + # what's found to remain allocate in k8s, subtracting 1GiB # overhead for misc system pods, and transitioning from GB in # description to GiB in mem_guarantee. # - CPU requests are lower than the description, with a factor of diff --git a/config/clusters/catalystproject-africa/staging.values.yaml b/config/clusters/catalystproject-africa/staging.values.yaml index bb78aa00ac..c32b32f352 100644 --- a/config/clusters/catalystproject-africa/staging.values.yaml +++ b/config/clusters/catalystproject-africa/staging.values.yaml @@ -53,7 +53,7 @@ jupyterhub: # https://github.com/2i2c-org/infrastructure/issues/2121. # # - Memory requests are different from the description, based on: - # whats found to remain allocate in k8s, subtracting 1GiB + # what's found to remain allocate in k8s, subtracting 1GiB # overhead for misc system pods, and transitioning from GB in # description to GiB in mem_guarantee. # - CPU requests are lower than the description, with a factor of diff --git a/config/clusters/catalystproject-latam/common.values.yaml b/config/clusters/catalystproject-latam/common.values.yaml index 087caaa658..5174a4ffe2 100644 --- a/config/clusters/catalystproject-latam/common.values.yaml +++ b/config/clusters/catalystproject-latam/common.values.yaml @@ -27,7 +27,7 @@ jupyterhub: # https://github.com/2i2c-org/infrastructure/issues/2121. # # - Memory requests are different from the description, based on: - # whats found to remain allocate in k8s, subtracting 1GiB + # what's found to remain allocate in k8s, subtracting 1GiB # overhead for misc system pods, and transitioning from GB in # description to GiB in mem_guarantee # https://cloud.google.com/kubernetes-engine/docs/concepts/plan-node-sizes. diff --git a/config/clusters/leap/common.values.yaml b/config/clusters/leap/common.values.yaml index 8235910299..32a6ecfa9c 100644 --- a/config/clusters/leap/common.values.yaml +++ b/config/clusters/leap/common.values.yaml @@ -90,7 +90,7 @@ basehub: # https://github.com/2i2c-org/infrastructure/issues/2121. # # - Memory requests are different from the description, based on: - # whats found to remain allocate in k8s, subtracting 1GiB + # what's found to remain allocate in k8s, subtracting 1GiB # overhead for misc system pods, and transitioning from GB in # description to GiB in mem_guarantee. # - CPU requests are lower than the description, with a factor of diff --git a/config/clusters/linked-earth/common.values.yaml b/config/clusters/linked-earth/common.values.yaml index 4ec4b7e663..43a1012559 100644 --- a/config/clusters/linked-earth/common.values.yaml +++ b/config/clusters/linked-earth/common.values.yaml @@ -55,7 +55,7 @@ basehub: # https://github.com/2i2c-org/infrastructure/issues/2121. # # - Memory requests are different from the description, based on: - # whats found to remain allocate in k8s, subtracting 1GiB + # what's found to remain allocate in k8s, subtracting 1GiB # overhead for misc system pods, and transitioning from GB in # description to GiB in mem_guarantee. # - CPU requests are lower than the description, with a factor of diff --git a/config/clusters/nasa-cryo/common.values.yaml b/config/clusters/nasa-cryo/common.values.yaml index e98b4c6353..02b4b36df1 100644 --- a/config/clusters/nasa-cryo/common.values.yaml +++ b/config/clusters/nasa-cryo/common.values.yaml @@ -107,7 +107,7 @@ basehub: # https://github.com/2i2c-org/infrastructure/issues/2121. # # - Memory requests are different from the description, based on: - # whats found to remain allocate in k8s, subtracting 1GiB + # what's found to remain allocate in k8s, subtracting 1GiB # overhead for misc system pods, and transitioning from GB in # description to GiB in mem_guarantee. # - CPU requests are lower than the description, with a factor of diff --git a/config/clusters/qcl/common.values.yaml b/config/clusters/qcl/common.values.yaml index 9576780aca..36f24213c8 100644 --- a/config/clusters/qcl/common.values.yaml +++ b/config/clusters/qcl/common.values.yaml @@ -61,7 +61,7 @@ jupyterhub: # https://github.com/2i2c-org/infrastructure/issues/2121. # # - Memory requests are different from the description, based on: - # whats found to remain allocate in k8s, subtracting 1GiB + # what's found to remain allocate in k8s, subtracting 1GiB # overhead for misc system pods, and transitioning from GB in # description to GiB in mem_guarantee # https://cloud.google.com/kubernetes-engine/docs/concepts/plan-node-sizes. diff --git a/config/clusters/smithsonian/common.values.yaml b/config/clusters/smithsonian/common.values.yaml index ae6a90f7f8..b313591b39 100644 --- a/config/clusters/smithsonian/common.values.yaml +++ b/config/clusters/smithsonian/common.values.yaml @@ -67,7 +67,7 @@ basehub: # https://github.com/2i2c-org/infrastructure/issues/2121. # # - Memory requests are different from the description, based on: - # whats found to remain allocate in k8s, subtracting 1GiB + # what's found to remain allocate in k8s, subtracting 1GiB # overhead for misc system pods, and transitioning from GB in # description to GiB in mem_guarantee. # - CPU requests are lower than the description, with a factor of diff --git a/deployer/README.md b/deployer/README.md index 134de872b6..47ffcc8f72 100644 --- a/deployer/README.md +++ b/deployer/README.md @@ -383,7 +383,7 @@ Once you run this command, run `export DOCKER_HOST=tcp://localhost:23760` in ano docker daemon. #### `exec shell` -This exec sub-command can be used to aquire a shell in various places of the infrastructure. +This exec sub-command can be used to acquire a shell in various places of the infrastructure. ##### `exec shell hub` diff --git a/deployer/commands/exec/infra_components.py b/deployer/commands/exec/infra_components.py index 5b98620c89..7ea1fba30f 100644 --- a/deployer/commands/exec/infra_components.py +++ b/deployer/commands/exec/infra_components.py @@ -210,8 +210,8 @@ def ask_for_dirname_again(): """ Function that asks the user to provide the name of the source and dest directories using typer prompts. - Returns the name of the source and dest directories as a touple if they were provided by the user - or the None, None touple. + Returns the name of the source and dest directories as a tuple if they were provided by the user + or the None, None tuple. """ print_colour("Asking for the dirs again...", "yellow") continue_with_dir_names_confirmation = typer.confirm( diff --git a/deployer/commands/generate/billing/importers.py b/deployer/commands/generate/billing/importers.py index cf25b66321..1094181a32 100644 --- a/deployer/commands/generate/billing/importers.py +++ b/deployer/commands/generate/billing/importers.py @@ -251,7 +251,7 @@ def get_shared_cluster_hub_costs(cluster, start_month, end_month): # Rename project to use hub names totals["project"] = totals["hub"] totals.drop("hub", axis=1) - # Calcluate cost from utilization + # Calculate cost from utilization # Needs to account for uptime checks and 2i2c paid for stuff totals["cost"] = totals["utilization"].multiply( totals["total_with_credits"].astype(float), axis=0 diff --git a/deployer/commands/generate/dedicated_cluster/common.py b/deployer/commands/generate/dedicated_cluster/common.py index bfe32c97da..3a87585d02 100644 --- a/deployer/commands/generate/dedicated_cluster/common.py +++ b/deployer/commands/generate/dedicated_cluster/common.py @@ -92,7 +92,7 @@ def generate_support_files(cluster_config_directory, vars): - `config//support.values.yaml` - `config//enc-support.secret.values.yaml` """ - # Generate the suppport values file `support.values.yaml` + # Generate the support values file `support.values.yaml` print_colour("Generating the support values file...", "yellow") with open( REPO_ROOT_PATH / "config/clusters/templates/common/support.values.yaml" diff --git a/deployer/commands/grafana/tokens.py b/deployer/commands/grafana/tokens.py index 4ed30dbc99..417305752d 100644 --- a/deployer/commands/grafana/tokens.py +++ b/deployer/commands/grafana/tokens.py @@ -122,7 +122,7 @@ def get_deployer_token(sa_endpoint, sa_id, headers): ) if not response.ok: print( - f"An error occured when retrieving the tokens the service account with id {sa_id}.\n" + f"An error occurred when retrieving the tokens the service account with id {sa_id}.\n" f"Error was {response.text}." ) response.raise_for_status() @@ -144,7 +144,7 @@ def create_deployer_token(sa_endpoint, sa_id, headers): if not response.ok: print( - "An error occured when creating the token for the deployer service account.\n" + "An error occurred when creating the token for the deployer service account.\n" f"Error was {response.text}." ) response.raise_for_status() diff --git a/deployer/commands/validate/cluster.schema.yaml b/deployer/commands/validate/cluster.schema.yaml index 6f43cb3ed1..602776363e 100644 --- a/deployer/commands/validate/cluster.schema.yaml +++ b/deployer/commands/validate/cluster.schema.yaml @@ -235,7 +235,7 @@ properties: type: string description: | Status code expected from hitting the health checkpoint for - this hub. Defaults to 200, can be overriden in case we have + this hub. Defaults to 200, can be overridden in case we have basic auth setup for the entire hub domain: type: string diff --git a/docs/contributing/code-review.md b/docs/contributing/code-review.md index 924af810b8..68ba2d1d01 100644 --- a/docs/contributing/code-review.md +++ b/docs/contributing/code-review.md @@ -44,7 +44,7 @@ or can wait for review. That said, sometimes the only way to understand the impact of a change is to merge and see how things go, so use your best judgment! -Here is a list of things you can clearly, unambigously self merge without +Here is a list of things you can clearly, unambiguously self merge without any approval. 1. Updating admin users for a hub @@ -119,7 +119,7 @@ To deploy changes to the authentication workflow, follow these steps: - cluster: `utoronto`, hub: `staging` (Azure AD) - cluster: `2i2c`, hub: `staging` (CILogon) 1. **Login into the staging hubs**. Try logging in into the hubs where you deployed your changes. -1. **Start a server**. Afer you've logged into the hub, make sure everything works as expected by spinning up a server. +1. **Start a server**. After you've logged into the hub, make sure everything works as expected by spinning up a server. 1. **Post the status of the manual steps above**. In your PR's top comment, post the hubs where you've deployed the changes and whether or not they are functioning properly. 1. **Wait for review and approval**. Leave the PR open for other team members to review and approve. diff --git a/docs/helper-programs/generate-general-info-table-about-hubs.py b/docs/helper-programs/generate-general-info-table-about-hubs.py index dd61c0180f..2b94038cde 100644 --- a/docs/helper-programs/generate-general-info-table-about-hubs.py +++ b/docs/helper-programs/generate-general-info-table-about-hubs.py @@ -2,7 +2,7 @@ This is used in two places: -- docs/_static/hub-table.json is published with the docs and meant for re-use in other parts of 2i2c +- docs/_static/hub-table.json is published with the docs and meant for reuse in other parts of 2i2c - docs/tmp/hub-table.csv is read by reference/hubs.md to create a list of hubs """ import pandas as pd @@ -81,7 +81,7 @@ def build_hub_list_entry( def build_hub_statistics_df(df): # Write some quick statistics for display # Calculate total number of community hubs by removing staging and demo hubs - # Remove `staging` hubs to count the total number of communites we serve + # Remove `staging` hubs to count the total number of communities we serve filter_out = ["staging", "demo"] community_hubs = df.loc[ df["name"].map(lambda a: all(ii not in a.lower() for ii in filter_out)) @@ -167,7 +167,7 @@ def main(): write_to_json_and_csv_files(df, "hub-table") write_to_json_and_csv_files(community_hubs_by_cluster, "hub-stats") - print("Finished updating list of hubs and statics tables...") + print("Finished updating list of hubs and statistics tables...") if __name__ == "__main__": diff --git a/docs/helper-programs/generate-hub-features-table.py b/docs/helper-programs/generate-hub-features-table.py index 34646060f0..8fd6e18a05 100644 --- a/docs/helper-programs/generate-hub-features-table.py +++ b/docs/helper-programs/generate-hub-features-table.py @@ -2,7 +2,7 @@ This is used in two places: -- docs/_static/hub-options-table.json is published with the docs and meant for re-use in other parts of 2i2c +- docs/_static/hub-options-table.json is published with the docs and meant for reuse in other parts of 2i2c - docs/tmp/hub-options-table.csv is read by reference/options.md to create a list of hubs """ import hcl2 @@ -171,7 +171,7 @@ def build_options_list_entry(hub, hub_count, values_files_features, terraform_fe "user buckets (scratch/persistent)": terraform_features.get( hub["name"], {} ).get("user_buckets", False), - "requestor pays for buckets storage": terraform_features.get( + "requester pays for buckets storage": terraform_features.get( hub["name"], {} ).get("requestor_pays", False), "authenticator": values_files_features["authenticator"], diff --git a/docs/howto/features/anonymized-usernames.md b/docs/howto/features/anonymized-usernames.md index c23ecb6317..42dc81ffbd 100644 --- a/docs/howto/features/anonymized-usernames.md +++ b/docs/howto/features/anonymized-usernames.md @@ -40,7 +40,7 @@ useful privacy guarantees to be worth it. Those are: 2. We live in a world where user data leaks are a fact of life, and you can buy tons of user identifiers for pretty cheap. This may also happen to *us*, and we may unintentionally leak data too! So users should still be hard to - de-anonymize when the attacker has in their posession the following: + de-anonymize when the attacker has in their possession the following: 1. List of user identifiers (emails, usernames, numeric user ids, etc) from *other data breaches*. diff --git a/docs/howto/features/cloud-access.md b/docs/howto/features/cloud-access.md index f6d8b8c7ff..5dd9ef9d74 100644 --- a/docs/howto/features/cloud-access.md +++ b/docs/howto/features/cloud-access.md @@ -136,6 +136,6 @@ This AWS IAM Role is managed via terraform. If the hub is a `daskhub`, nest the config under a `basehub` key ``` -7. Get this change deployed, and users should now be able to use the requestor pays feature! +7. Get this change deployed, and users should now be able to use the requester pays feature! Currently running users might have to restart their pods for the change to take effect. diff --git a/docs/howto/manage-domains/redirects.md b/docs/howto/manage-domains/redirects.md index 9e22d87f64..358b0614b5 100644 --- a/docs/howto/manage-domains/redirects.md +++ b/docs/howto/manage-domains/redirects.md @@ -16,5 +16,5 @@ redirects: ``` You can add any number of such redirects. They will all be `302 Temporary` -redirects, in case we want to re-use the old domain for something else in +redirects, in case we want to reuse the old domain for something else in the future. diff --git a/docs/howto/troubleshoot/cilogon-user-accounts.md b/docs/howto/troubleshoot/cilogon-user-accounts.md index 87390c1888..85ac131db1 100644 --- a/docs/howto/troubleshoot/cilogon-user-accounts.md +++ b/docs/howto/troubleshoot/cilogon-user-accounts.md @@ -1,15 +1,15 @@ # CILogon: switch Identity Providers or user accounts By default, logging in with a particular user account will persist your credentials in future sessions. -This means that you'll automatically re-use the same institutional and user account when you access the hub's home page. +This means that you'll automatically reuse the same institutional and user account when you access the hub's home page. ## Switch Identity Providers 1. **Logout of the Hub** using the logout button or by going to `https://{hub-name}/hub/logout`. -2. **Clear browser cookies** (optional). If the user asked CILogon to re-use the same Identity Provider connection when they logged in, they'll need to [clear browser cookies](https://www.lifewire.com/how-to-delete-cookies-2617981) for . +2. **Clear browser cookies** (optional). If the user asked CILogon to reuse the same Identity Provider connection when they logged in, they'll need to [clear browser cookies](https://www.lifewire.com/how-to-delete-cookies-2617981) for . ```{figure} ../../images/cilogon-remember-this-selection.png - The dialog box that allows you to re-use the same Identity Provider. + The dialog box that allows you to reuse the same Identity Provider. ``` Firefox example: @@ -40,6 +40,6 @@ If you see a 403 error page, this means that the account you were using to login ```{figure} ../../images/403-forbidden.png ``` -If you think this is an error, and the account should have been allowed, then contact the hub adminstrator/s. +If you think this is an error, and the account should have been allowed, then contact the hub administrator/s. If you used the wrong user account, you can log in using another account by following the steps in [](troubleshoot:cilogon:switch-user-accounts). diff --git a/docs/howto/troubleshoot/logs/cloud-logs.md b/docs/howto/troubleshoot/logs/cloud-logs.md index 6e2db98741..121d371326 100644 --- a/docs/howto/troubleshoot/logs/cloud-logs.md +++ b/docs/howto/troubleshoot/logs/cloud-logs.md @@ -25,7 +25,7 @@ logs are kept for 30 days, and are searchable. as time sliders. However, for most of our logs, the 'log levels' (error, warning, etc) are not parsed correctly, and hence are useless. -4. Google provies a [query library](https://cloud.google.com/logging/docs/view/query-library) set of [sample queries](https://cloudlogging.app.goo.gl/Ad7B8hjFHpj6X7rT8) that you can access via the Library tab in Logs Explorer. +4. Google provides a [query library](https://cloud.google.com/logging/docs/view/query-library) set of [sample queries](https://cloudlogging.app.goo.gl/Ad7B8hjFHpj6X7rT8) that you can access via the Library tab in Logs Explorer. ### Common queries diff --git a/docs/howto/troubleshoot/logs/kubectl-logs.md b/docs/howto/troubleshoot/logs/kubectl-logs.md index b379443fc2..4b8f741914 100644 --- a/docs/howto/troubleshoot/logs/kubectl-logs.md +++ b/docs/howto/troubleshoot/logs/kubectl-logs.md @@ -139,7 +139,7 @@ The following commands require passing the namespace where a specific pod is run ``` ### Kubernetes pod logs -You can access any pod's logs by using the `kubectl logs` commands. Bellow are some of the most common debugging commands. +You can access any pod's logs by using the `kubectl logs` commands. Below are some of the most common debugging commands. ```{tip} 1. The `--follow` flag diff --git a/docs/hub-deployment-guide/cloud-accounts/new-gcp-project.md b/docs/hub-deployment-guide/cloud-accounts/new-gcp-project.md index 1efccf79dd..43929595a4 100644 --- a/docs/hub-deployment-guide/cloud-accounts/new-gcp-project.md +++ b/docs/hub-deployment-guide/cloud-accounts/new-gcp-project.md @@ -49,7 +49,7 @@ Finally, we should check what quotas are enforced on the project and increase th ```{warning} This must be only done if it is a **new** billing account handled by 2i2c for a specific project, -rather than just for a new project under the same billing account. This is a somewhat rare occurance! +rather than just for a new project under the same billing account. This is a somewhat rare occurrence! If there is already billing export set up for this **billing account** as you try to complete these steps, do not change it and raise an issue for engineering to diff --git a/docs/hub-deployment-guide/deploy-support/configure-support.md b/docs/hub-deployment-guide/deploy-support/configure-support.md index 12c8313d60..e621cf522b 100644 --- a/docs/hub-deployment-guide/deploy-support/configure-support.md +++ b/docs/hub-deployment-guide/deploy-support/configure-support.md @@ -2,7 +2,7 @@ # Configure and deploy the `support` chart The `support` chart is a helm chart maintained by the 2i2c Engineers that consists of common tools used to support JupyterHub deployments in the cloud. -These tools are [`ingress-nginx`](https://kubernetes.github.io/ingress-nginx/), for controlling ingresses and load balancing; [`cert-manager`](https://cert-manager.io/docs/), for automatically provisioning TLS certificates from [Let's Encrypt](https://letsencrypt.org/); [Prometheus](https://prometheus.io/), for scraping and storing metrics from the cluster and hub; and [Grafana](https://grafana.com/), for visualising the metrics retreived by Prometheus. +These tools are [`ingress-nginx`](https://kubernetes.github.io/ingress-nginx/), for controlling ingresses and load balancing; [`cert-manager`](https://cert-manager.io/docs/), for automatically provisioning TLS certificates from [Let's Encrypt](https://letsencrypt.org/); [Prometheus](https://prometheus.io/), for scraping and storing metrics from the cluster and hub; and [Grafana](https://grafana.com/), for visualising the metrics retrieved by Prometheus. This section will walk you through how to deploy the support chart on a cluster. @@ -119,7 +119,7 @@ Namecheap.com](https://ap.www.namecheap.com/Domains/DomainControlPanel/2i2c.clou 2. `*.`, for all other hubs, grafana and prometheus instances. -Use an `A` record when we point to an external IP addresse (GCP, Azure), and a +Use an `A` record when we point to an external IP address (GCP, Azure), and a `CNAME` record when we point to another domain (AWS). ```{note} diff --git a/docs/hub-deployment-guide/hubs/other-hub-ops/delete-hub.md b/docs/hub-deployment-guide/hubs/other-hub-ops/delete-hub.md index a1c91b52da..81643391a4 100644 --- a/docs/hub-deployment-guide/hubs/other-hub-ops/delete-hub.md +++ b/docs/hub-deployment-guide/hubs/other-hub-ops/delete-hub.md @@ -5,7 +5,7 @@ If you'd like to delete a hub, there are a few steps that we need to take: ## 1. Manage existing data -The existing data should either be migrated to another place or should be deleted, depending on what has been aggreed to with the Community Representative. +The existing data should either be migrated to another place or should be deleted, depending on what has been agreed to with the Community Representative. If the data should be migrated from the hub before decommissioning, then make sure that a 2i2c Engineer has access to the destination in order to complete the data migration. @@ -76,12 +76,12 @@ This will clean up some of the hub values related to auth and must be done prior If the hub remains listed in its cluster's `cluster.yaml` file, the hub could be redeployed by any merged PR triggering our CI/CD pipeline. -Open a decomissioning PR that removes the appropriate hub entry from the +Open a decommissioning PR that removes the appropriate hub entry from the `config/clusters/$CLUSTER_NAME/cluster.yaml` file and associated `*.values.yaml` files no longer referenced in the `cluster.yaml` file. You can continue with the steps below before the PR is merged, but be ready to -re-do them if the CI/CD pipeline was triggered before the decomissioning PR was +re-do them if the CI/CD pipeline was triggered before the decommissioning PR was merged. ## 4. Delete the Helm release and namespace diff --git a/docs/hub-deployment-guide/hubs/other-hub-ops/move-hubs/across-clusters.md b/docs/hub-deployment-guide/hubs/other-hub-ops/move-hubs/across-clusters.md index d538d935f9..44ebcc4e2c 100644 --- a/docs/hub-deployment-guide/hubs/other-hub-ops/move-hubs/across-clusters.md +++ b/docs/hub-deployment-guide/hubs/other-hub-ops/move-hubs/across-clusters.md @@ -15,7 +15,7 @@ Next, copy home directory contents from the old cluster to the new cluster. ```{note} This might not entirely be necessary - if the source and target cluster -are in the same GCP Project / AWS Account, we can just re-use the same +are in the same GCP Project / AWS Account, we can just reuse the same home directory storage! ``` @@ -33,7 +33,7 @@ Primarily used with GKE right now. NFS server will be able to open SSH connections to the source NFS server. 4. Copy the NFS home directories from the source NFS server to the target NFS server, making sure that the NFS exports locations - match up appopriately. For example, if the source NFS server has + match up appropriately. For example, if the source NFS server has home directories for each hub stored in `/export/home-01/homes`, and the target NFS server also has hub home directories stored under `/export/home-01/homes`, you can `scp` the contents across with: @@ -99,7 +99,7 @@ source & dest EFS instances are created, create a DataSync instance in the the VPC, Subnet and Security Group that have access to the EFS instance (you can find these details in the 'Network' tab of the EFS page in the AWS Console). Set the transfer to hourly, but immediately manually start the sync task. Once the -data is transfered over and verified, switch the EFS used in the hub config. +data is transferred over and verified, switch the EFS used in the hub config. Remember to delete the datasync instance soon after - or it might incur extra charges! @@ -191,7 +191,7 @@ Tip: You can use [this script](https://github.com/2i2c-org/infrastructure/tree/H Make sure the new cluster has Grafana Dashboards deployed. If not, follow the steps in [](setup-grafana). Also, verify if the old cluster had Prometheus deployed and whether you also need to migrate that. ## 4. Take down the current hub -Delete the proxy service to make the hub unreacheable. +Delete the proxy service to make the hub unreachable. ``` bash kubectl delete svc proxy-public -n diff --git a/docs/hub-deployment-guide/new-cluster/aws.md b/docs/hub-deployment-guide/new-cluster/aws.md index 6e21d149f8..f517c18d59 100644 --- a/docs/hub-deployment-guide/new-cluster/aws.md +++ b/docs/hub-deployment-guide/new-cluster/aws.md @@ -35,7 +35,7 @@ terraform to provision supporting infrastructure, such as storage buckets. (new-cluster:aws-setup-credentials)= ### Setup credentials -Depending on wether this project is using AWS SSO or not, you can use the following +Depending on whether this project is using AWS SSO or not, you can use the following links to figure out how to authenticate to this project from your terminal. - [For accounts setup with AWS SSO](cloud-access:aws-sso:terminal) diff --git a/docs/reference/ci-cd/auto-bumping.md b/docs/reference/ci-cd/auto-bumping.md index b2186a5018..6995214692 100644 --- a/docs/reference/ci-cd/auto-bumping.md +++ b/docs/reference/ci-cd/auto-bumping.md @@ -55,7 +55,7 @@ Two inputs are required for this Action: 2. A variable called `chart_urls` which is a dictionary containing information about the sub-charts we wish to bump in the given config file. By providing a dictionary in this way, we can choose to include/exclude sub-charts in the given config from being bumped. -The `chart_urls` has the sub-charts we wish to bump as keys, and URLs where a list of pulished versions of those charts is available. +The `chart_urls` has the sub-charts we wish to bump as keys, and URLs where a list of published versions of those charts is available. An example below would bump the JupyterHub subchart of the basehub helm chart. ```json diff --git a/docs/reference/options.md b/docs/reference/options.md index 2d838a7203..c053abac5a 100644 --- a/docs/reference/options.md +++ b/docs/reference/options.md @@ -48,7 +48,7 @@ $(document).ready( function () { {"render": checkbox}, // dedicated cluster column {"render": checkbox}, // dedicated nodepool column {"render": checkbox}, // user buckets (scratch/persistent) column - {"render": checkbox}, // requestor pays for buckets storage column + {"render": checkbox}, // requester pays for buckets storage column null, // authenticator column {"render": checkbox}, // user anonymisation column {"render": checkbox}, // allusers access column @@ -231,7 +231,7 @@ flowchart TB public_bucket[Publicly accessible] from_hub[Buckets accessible from the Hub] outside_hub[Buckets accessible from outside the Hub] - requestor_pays[Requestor Pays] + requestor_pays[Requester Pays] hub_cloud_permissions --> outside_hub hub_cloud_permissions -- default --> from_hub diff --git a/docs/reference/tools.md b/docs/reference/tools.md index eb9b2f5c35..34881f7073 100644 --- a/docs/reference/tools.md +++ b/docs/reference/tools.md @@ -84,7 +84,7 @@ to encrypt our secrets, so you need the Google Cloud tools installed and authenticated locally (following [the instructions here](https://github.com/mozilla/sops/#23encrypting-using-gcp-kms)) before you can use sops. -`sops` is called programatically by our deployment scripts to decrypt +`sops` is called programmatically by our deployment scripts to decrypt files for deployment, and you will use it interactively to modify or encrypt new files. diff --git a/docs/sre-guide/common-problems-solutions.md b/docs/sre-guide/common-problems-solutions.md index f3b8df36cc..fc64871c89 100644 --- a/docs/sre-guide/common-problems-solutions.md +++ b/docs/sre-guide/common-problems-solutions.md @@ -196,7 +196,7 @@ name of with our second environment variable. export GITHUB_ENV=test.txt # You can call this file anything you like, it's the setting of GITHUB_ENV that's important ``` -This mimicks the GitHub Actions environment where a `GITHUB_ENV` file is available +This mimics the GitHub Actions environment where a `GITHUB_ENV` file is available to store and share environment variables across steps/jobs, and this will be where our JSON formatted job matrices will be written to. diff --git a/docs/sre-guide/manage-k8s/node-administration.md b/docs/sre-guide/manage-k8s/node-administration.md index 446e52fa73..b6f22b92c0 100644 --- a/docs/sre-guide/manage-k8s/node-administration.md +++ b/docs/sre-guide/manage-k8s/node-administration.md @@ -11,7 +11,7 @@ This separation should protect against user pods exhausting the resources needed The machines where the core nodes run, are different than the ones on which the user nodes run. The type of these machines is chosen based on the number, type, and the resource needs (CPU, memory, etc.) of the pods that will be scheduled to run on these nodes. -Because of this resource dependance, these types might be adjusted in the future. +Because of this resource dependence, these types might be adjusted in the future. You can checkout the exact type of the core and user nodes VMs in the `terraform` config for each cloud provider. For example, here is the [`terraform` config for Google Cloud](https://github.com/2i2c-org/infrastructure/tree/HEAD/terraform/gcp/variables.tf). diff --git a/docs/sre-guide/node-scale-up/azure.md b/docs/sre-guide/node-scale-up/azure.md index 01fc6b7fda..785145852e 100644 --- a/docs/sre-guide/node-scale-up/azure.md +++ b/docs/sre-guide/node-scale-up/azure.md @@ -29,7 +29,7 @@ server startup faster. then in order to scale up the node pool to an exact number of nodes, temporarily deactivate the autoscaler, by selecting the `Manual` option, introduce the desired number of nodes then click `Apply`. -1. After the Apply succeded, you should see the new nodes coming up. +1. After the Apply succeeded, you should see the new nodes coming up. You can then click on `Scale node pool` option again, **enable the `Autoscale`**, and set the `Min` number of nodes to the desired one the you set in the step before. diff --git a/docs/sre-guide/node-scale-up/index.md b/docs/sre-guide/node-scale-up/index.md index ff2552fe28..42f856f797 100644 --- a/docs/sre-guide/node-scale-up/index.md +++ b/docs/sre-guide/node-scale-up/index.md @@ -1,6 +1,6 @@ # Scaling nodepools -When we provision Kubernetes clusters, we setup two, somtimes three, nodepools: +When we provision Kubernetes clusters, we setup two, sometimes three, nodepools: - `core` that contains 'always-on' services such as the hub itself; - `notebooks` where users' notebook servers are created; diff --git a/docs/sre-guide/support/grafana-account.md b/docs/sre-guide/support/grafana-account.md index ef2d64f3bd..69a3006c2d 100644 --- a/docs/sre-guide/support/grafana-account.md +++ b/docs/sre-guide/support/grafana-account.md @@ -29,7 +29,7 @@ However, for now, we can **invite** individual users to a grafana via the Grafan and then select "Users". ```{figure} ../../images/grafana-grant-access_step-3a.jpg - Location of the "hamburger" menu on the Grafana dashbaord + Location of the "hamburger" menu on the Grafana dashboard ``` ```{figure} ../../images/grafana-grant-access_step-3b.jpg @@ -72,5 +72,5 @@ However, for now, we can **invite** individual users to a grafana via the Grafan ``` ```{warning} - Anyone posessing this invite link can access the grafana, so make sure to not leak it! + Anyone possessing this invite link can access the grafana, so make sure to not leak it! ``` diff --git a/docs/topic/access-creds/cloud-auth.md b/docs/topic/access-creds/cloud-auth.md index 3953ad4b28..f05d3326d9 100644 --- a/docs/topic/access-creds/cloud-auth.md +++ b/docs/topic/access-creds/cloud-auth.md @@ -44,7 +44,7 @@ AWS Organizations (cloud-access:aws-management-account)= AWS Management Account -: A special account that is a centralized place for configuration for an AWS Organization and other accounts that might be in it. Our AWS Management account is `2i2c-sandbox`. It defines our **payment methods** for centralized payment across all of our accounts. So each of our AWS Accounts generates a bill, and these are consolidated into `2i2c-sandbox` and payed with a single credit card. +: A special account that is a centralized place for configuration for an AWS Organization and other accounts that might be in it. Our AWS Management account is `2i2c-sandbox`. It defines our **payment methods** for centralized payment across all of our accounts. So each of our AWS Accounts generates a bill, and these are consolidated into `2i2c-sandbox` and paid with a single credit card. (cloud-access:aws-sso)= diff --git a/docs/topic/features.md b/docs/topic/features.md index 2ca11f4014..517f7fee16 100644 --- a/docs/topic/features.md +++ b/docs/topic/features.md @@ -28,7 +28,7 @@ improving the security posture of our hubs. By default, the organization *hosting* data on Google Cloud pays for both storage and bandwidth costs of the data. However, Google Cloud also offers -a [requestor pays](https://cloud.google.com/storage/docs/requester-pays) +a [requester pays](https://cloud.google.com/storage/docs/requester-pays) option, where the bandwidth costs are paid for by the organization *requesting* the data. This is very commonly used by organizations that provide big datasets on Google Cloud storage, to sustainably share costs of maintaining the data. @@ -60,6 +60,6 @@ of a project. We set the environment variable `PERSISTENT_BUCKET` to the form `:///` so users can put stuff in this. ```{warning} -Objects put in `PERSISTENT_BUCKET` *must* be deleted by the users when no logner in use +Objects put in `PERSISTENT_BUCKET` *must* be deleted by the users when no longer in use to prevent cost overruns! This *can not* be managed by 2i2c. ``` diff --git a/docs/topic/infrastructure/config.md b/docs/topic/infrastructure/config.md index 87a563c4ac..d0fe8c7991 100644 --- a/docs/topic/infrastructure/config.md +++ b/docs/topic/infrastructure/config.md @@ -26,7 +26,7 @@ pieces of config people want to know values for, and where you can find them. The default memory limit and guarantee for all users across all our hubs is set in [`helm-charts/basehub/values.yaml`](https://github.com/2i2c-org/infrastructure/tree/HEAD/helm-charts/basehub/values.yaml#L104), -under `jupyterhub.singleuser.memory`. This is sometimes overriden on a per-hub +under `jupyterhub.singleuser.memory`. This is sometimes overridden on a per-hub basis in the config for the hub under [`config/clusters`](https://github.com/2i2c-org/infrastructure/tree/HEAD/config/clusters) ### 2i2c staff lists diff --git a/docs/topic/infrastructure/hub-helm-charts.md b/docs/topic/infrastructure/hub-helm-charts.md index 9dd83ca40c..1d0965b575 100644 --- a/docs/topic/infrastructure/hub-helm-charts.md +++ b/docs/topic/infrastructure/hub-helm-charts.md @@ -41,7 +41,7 @@ subcharts of the daskhub. ``` % The editable version of the diagram is here: https://docs.google.com/presentation/d/1KMyrTd3wdR715tPGuzIHkHqScXBlLpeiksIM2x7EI0g/edit?usp=sharing -This hierachy is the reason why when adding a new hub using the `daskhub` +This hierarchy is the reason why when adding a new hub using the `daskhub` specific configuration in a `*.values.yaml` file needs to be nested under a `basehub` key, indicating that we are overriding configuration from the *basehub/jupyterhub* parent chart. diff --git a/docs/topic/monitoring-alerting/grafana.md b/docs/topic/monitoring-alerting/grafana.md index c5453db6b3..730cbbc33d 100644 --- a/docs/topic/monitoring-alerting/grafana.md +++ b/docs/topic/monitoring-alerting/grafana.md @@ -65,7 +65,7 @@ Navigating at , shows a `JupyterHub - user CPU usage distribution - user memory usage distribution - server start times - - hub respone latency + - hub response latency There is also a Panel section about `Anomalous user pods` where pods with high CPU usage or high memory usage are tracked. diff --git a/extra-scripts/comment-deployment-plan-pr.py b/extra-scripts/comment-deployment-plan-pr.py index 26c8d261ca..66d8b9edec 100644 --- a/extra-scripts/comment-deployment-plan-pr.py +++ b/extra-scripts/comment-deployment-plan-pr.py @@ -45,7 +45,7 @@ # If "Link" is present in the response headers, that means that the results are # paginated and we need to loop through them to collect all the results. # It is unlikely that we will have more than 100 artifact results for a single -# worflow ID however. +# workflow ID however. while ("Link" in response.headers.keys()) and ( 'rel="next"' in response.headers["Link"] ): diff --git a/helm-charts/basehub/templates/home-dirsize-reporter.yaml b/helm-charts/basehub/templates/home-dirsize-reporter.yaml index 2032105ee3..0def5663f3 100644 --- a/helm-charts/basehub/templates/home-dirsize-reporter.yaml +++ b/helm-charts/basehub/templates/home-dirsize-reporter.yaml @@ -38,7 +38,7 @@ spec: image: quay.io/yuvipanda/prometheus-dirsize-exporter:v3.0 resources: # Provide limited resources for this collector, as it can - # baloon up (especially in CPU) quite easily. We are quite ok with + # balloon up (especially in CPU) quite easily. We are quite ok with # the collection taking a while as long as we aren't costing too much # CPU or RAM requests: diff --git a/helm-charts/basehub/values.yaml b/helm-charts/basehub/values.yaml index c3b1afb50f..ec16311659 100644 --- a/helm-charts/basehub/values.yaml +++ b/helm-charts/basehub/values.yaml @@ -329,7 +329,7 @@ jupyterhub: # images, this is just invisible in the UI and there is no performance overhead # for these extra bind mounts. An additional positive here is that in case *students* # end up accidentally hardcoding paths in their notebooks, it will continue to work - # regardless of wether they or on RStudio or JupyterLab (described to us as a serious + # regardless of whether they or on RStudio or JupyterLab (described to us as a serious # problem by openscapes) - name: home mountPath: /home/rstudio diff --git a/helm-charts/daskhub/values.yaml b/helm-charts/daskhub/values.yaml index 202d220105..b9d484762e 100644 --- a/helm-charts/daskhub/values.yaml +++ b/helm-charts/daskhub/values.yaml @@ -99,7 +99,7 @@ basehub: dask-gateway: enabled: true # Enabling dask-gateway will install Dask Gateway as a dependency. - # Futher Dask Gateway configuration goes here + # Further Dask Gateway configuration goes here # See https://github.com/dask/dask-gateway/blob/master/resources/helm/dask-gateway/values.yaml gateway: backend: diff --git a/helm-charts/images/hub/Dockerfile b/helm-charts/images/hub/Dockerfile index c58effa62d..73ed81ab51 100644 --- a/helm-charts/images/hub/Dockerfile +++ b/helm-charts/images/hub/Dockerfile @@ -16,7 +16,7 @@ FROM jupyterhub/k8s-hub:3.2.1 # chartpress.yaml defines multiple hub images differentiated only by a # requirements.txt file with dependencies, this build argument allows us to -# re-use this Dockerfile for all images. +# reuse this Dockerfile for all images. ARG REQUIREMENTS_FILE COPY ${REQUIREMENTS_FILE} /tmp/ diff --git a/helm-charts/support/Chart.yaml b/helm-charts/support/Chart.yaml index eeaff70123..952be24096 100644 --- a/helm-charts/support/Chart.yaml +++ b/helm-charts/support/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: support version: "0.1.0" -description: Cluster wide depdencies for deployed hubs +description: Cluster wide dependencies for deployed hubs dependencies: # Prometheus for collection of metrics. diff --git a/helm-charts/support/values.yaml b/helm-charts/support/values.yaml index 39996dc6ad..42702aedcd 100644 --- a/helm-charts/support/values.yaml +++ b/helm-charts/support/values.yaml @@ -297,7 +297,7 @@ grafana: # prometheus and grafana. # # Grafana's memory use seems to increase over time but seems reasonable to - # stay below 200Mi for years to come. Grafana's CPU use seems miniscule with + # stay below 200Mi for years to come. Grafana's CPU use seems minuscule with # peaks at up to 9m CPU from one user is browsing its dashboards. # # PromQL queries for CPU and memory use: diff --git a/pyproject.toml b/pyproject.toml index fee93ca4f3..870aa15469 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,3 +10,7 @@ asyncio_mode = "auto" testpaths = [ "./tests", ] + +[tool.codespell] +skip = '.git,*.pdf,*.svg,*.secret.*,*.key' +ignore-words-list = 'aks' diff --git a/terraform/aws/projects/nasa-cryo.tfvars b/terraform/aws/projects/nasa-cryo.tfvars index 57255c60d1..1f45519983 100644 --- a/terraform/aws/projects/nasa-cryo.tfvars +++ b/terraform/aws/projects/nasa-cryo.tfvars @@ -25,7 +25,7 @@ hub_cloud_permissions = { requestor_pays : true, bucket_admin_access : ["scratch-staging", "persistent-staging"], # Provides readonly requestor-pays access to usgs-landsat bucket - # FIXME: We should find a way to allow access to *all* requestor pays + # FIXME: We should find a way to allow access to *all* requester pays # buckets, without having to explicitly list them. However, we don't want # to give access to all *internal* s3 buckets willy-nilly - this can be # a massive security hole, especially if terraform state is also here. @@ -60,7 +60,7 @@ hub_cloud_permissions = { requestor_pays : true, bucket_admin_access : ["scratch", "persistent"], # Provides readonly requestor-pays access to usgs-landsat bucket - # FIXME: We should find a way to allow access to *all* requestor pays + # FIXME: We should find a way to allow access to *all* requester pays # buckets, without having to explicitly list them. However, we don't want # to give access to all *internal* s3 buckets willy-nilly - this can be # a massive security hole, especially if terraform state is also here. diff --git a/terraform/gcp/cluster.tf b/terraform/gcp/cluster.tf index 5d14d383da..d12860e302 100644 --- a/terraform/gcp/cluster.tf +++ b/terraform/gcp/cluster.tf @@ -254,7 +254,7 @@ resource "google_container_node_pool" "notebook" { version = coalesce(each.value.node_version, var.k8s_versions.notebook_nodes_version) # terraform treats null same as unset, so we only set the node_locations - # here if it is explicitly overriden. If not, it will just inherit whatever + # here if it is explicitly overridden. If not, it will just inherit whatever # is set for the cluster. node_locations = length(each.value.zones) == 0 ? null : each.value.zones @@ -357,7 +357,7 @@ resource "google_container_node_pool" "dask_worker" { version = var.k8s_versions.dask_nodes_version # terraform treats null same as unset, so we only set the node_locations - # here if it is explicitly overriden. If not, it will just inherit whatever + # here if it is explicitly overridden. If not, it will just inherit whatever # is set for the cluster. node_locations = length(each.value.zones) == 0 ? null : each.value.zones diff --git a/terraform/gcp/main.tf b/terraform/gcp/main.tf index 5c25c9af1d..e9cb62e3e8 100644 --- a/terraform/gcp/main.tf +++ b/terraform/gcp/main.tf @@ -44,7 +44,7 @@ provider "google" { # the API for all our existing GCP projects and new GCP projects, and # then reference var.project_id instead. # - # But who knows, its hard to understand whats going on. + # But who knows, its hard to understand what's going on. # user_project_override = true billing_project = var.billing_project_id diff --git a/terraform/gcp/variables.tf b/terraform/gcp/variables.tf index 9a9e5f2faf..91a7526f3e 100644 --- a/terraform/gcp/variables.tf +++ b/terraform/gcp/variables.tf @@ -226,7 +226,7 @@ variable "core_node_max_count" { Core nodes can scale up to this many nodes if necessary. They are part of the 'base cost', should be kept to a minimum. This number should be small enough to prevent runaway scaling, - but large enough to support ocassional spikes for whatever reason. + but large enough to support occasional spikes for whatever reason. Minimum node count is fixed at 1. EOT @@ -267,7 +267,7 @@ variable "user_buckets" { 'delete_after' specifies the number of days after which any content in the bucket will be deleted. Set to null to not delete data. - 'extra_admin_members' describes extra identies (user groups, user accounts, + 'extra_admin_members' describes extra identities (user groups, user accounts, service accounts, etc) that will have *full* access to this bucket. This is primarily useful for moving data into and out of buckets from outside the cloud. See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/storage_bucket_iam#member/members diff --git a/terraform/gcp/workload-identity.tf b/terraform/gcp/workload-identity.tf index b898cfa32d..99e907c74e 100644 --- a/terraform/gcp/workload-identity.tf +++ b/terraform/gcp/workload-identity.tf @@ -32,7 +32,7 @@ resource "google_service_account_iam_binding" "workload_identity_binding" { ] } -# To access GCS buckets with requestor pays, the calling code needs +# To access GCS buckets with requester pays, the calling code needs # to have serviceusage.services.use permission. We create a role # granting just this to provide the workload SA, so user pods can # use it. See https://cloud.google.com/storage/docs/requester-pays diff --git a/tests/test_billing.py b/tests/test_billing.py index 6b80f2bfe8..66547184e0 100644 --- a/tests/test_billing.py +++ b/tests/test_billing.py @@ -198,7 +198,7 @@ def test_shared_cluster_internal(shared_cluster, start_date, end_date): ), "Utilization for 2i2c_costs should be 0.75" assert ( "staging" not in rows - ), "Utilization for 2i2c_costs should replace interal namespaces" + ), "Utilization for 2i2c_costs should replace internal namespaces" def test_shared_cluster_aggregates_internal(shared_cluster, start_date, end_date):