Skip to content

Commit

Permalink
all: fix typos discovered by codespell (#2392)
Browse files Browse the repository at this point in the history
```
codespell --skip .git --skip ./site --skip ./cmd  | less
```

See https://github.com/codespell-project/codespell

Signed-off-by: Alexander Yastrebov <[email protected]>
  • Loading branch information
AlexanderYastrebov authored Jun 12, 2023
1 parent 47eef54 commit 311e661
Show file tree
Hide file tree
Showing 48 changed files with 81 additions and 81 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ fuzz: ## run all fuzz tests
lint: build staticcheck ## run all linters

.PHONY: clean
clean: ## clean temorary files and driectories
clean: ## clean temporary files and directories
go clean -i -cache -testcache
rm -rf .coverprofile-all .cover
rm -f ./_test_plugins/*.so
Expand Down
4 changes: 2 additions & 2 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -367,8 +367,8 @@ func NewConfig() *Config {
flag.BoolVar(&cfg.ServeRouteCounter, "serve-route-counter", false, "enables reporting counting metrics for each route. Has the route, HTTP method and status code as labels. Currently just implemented for the Prometheus metrics flavour")
flag.BoolVar(&cfg.ServeHostMetrics, "serve-host-metrics", false, "enables reporting total serve time metrics for each host")
flag.BoolVar(&cfg.ServeHostCounter, "serve-host-counter", false, "enables reporting counting metrics for each host. Has the route, HTTP method and status code as labels. Currently just implemented for the Prometheus metrics flavour")
flag.BoolVar(&cfg.ServeMethodMetric, "serve-method-metric", true, "enables the HTTP method as a domain of the total serve time metric. It affects both route and host splitted metrics")
flag.BoolVar(&cfg.ServeStatusCodeMetric, "serve-status-code-metric", true, "enables the HTTP response status code as a domain of the total serve time metric. It affects both route and host splitted metrics")
flag.BoolVar(&cfg.ServeMethodMetric, "serve-method-metric", true, "enables the HTTP method as a domain of the total serve time metric. It affects both route and host split metrics")
flag.BoolVar(&cfg.ServeStatusCodeMetric, "serve-status-code-metric", true, "enables the HTTP response status code as a domain of the total serve time metric. It affects both route and host split metrics")
flag.BoolVar(&cfg.BackendHostMetrics, "backend-host-metrics", false, "enables reporting total serve time metrics for each backend")
flag.BoolVar(&cfg.AllFiltersMetrics, "all-filters-metrics", false, "enables reporting combined filter metrics for each route")
flag.BoolVar(&cfg.CombinedResponseMetrics, "combined-response-metrics", false, "enables reporting combined response time metrics")
Expand Down
4 changes: 2 additions & 2 deletions config/defaultfilterflags_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,15 +73,15 @@ func Test_defaultFiltersFlags_Set(t *testing.T) {
}
if !tt.wantErr {
if len(tt.want) != len(dpf.filters) {
t.Errorf("defaultFiltersFlags size missmatch got %d want %d", len(dpf.filters), len(tt.want))
t.Errorf("defaultFiltersFlags size mismatch got %d want %d", len(dpf.filters), len(tt.want))
}

if err := yaml.Unmarshal([]byte(tt.args), dpf); err != nil {
t.Errorf("defaultFiltersFlags.UnmarshalYAML() error = %v, wantErr %v", err, tt.wantErr)
}

if len(tt.want) != len(dpf.filters) {
t.Errorf("defaultFiltersFlags from yaml size missmatch got %d want %d", len(dpf.filters), len(tt.want))
t.Errorf("defaultFiltersFlags from yaml size mismatch got %d want %d", len(dpf.filters), len(tt.want))
}
}
})
Expand Down
2 changes: 1 addition & 1 deletion config/listflag_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ func TestListFlag(t *testing.T) {
})

t.Run("unmarshal error", func(t *testing.T) {
const input = "invlaid yaml"
const input = "invalid yaml"
current := commaListFlag()
if err := yaml.Unmarshal([]byte(input), current); err == nil {
t.Errorf("Failed to get error from Unmarshal() for invalid input: %q", input)
Expand Down
2 changes: 1 addition & 1 deletion dataclients/kubernetes/kube.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ type Options struct {
BackendNameTracingTag bool

// OnlyAllowedExternalNames will enable validation of ingress external names and route groups network
// backend addresses, explicit LB endpoints validation agains the list of patterns in
// backend addresses, explicit LB endpoints validation against the list of patterns in
// AllowedExternalNames.
OnlyAllowedExternalNames bool

Expand Down
2 changes: 1 addition & 1 deletion dataclients/kubernetes/kube_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -713,7 +713,7 @@ func TestIngress(t *testing.T) {

r, d, err := dc.LoadUpdate()
if err != nil || len(d) != 0 {
t.Error("udpate failed")
t.Error("update failed")
}

checkRoutes(t, r, map[string]string{
Expand Down
2 changes: 1 addition & 1 deletion dataclients/kubernetes/routegroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -426,7 +426,7 @@ func transformExplicitGroupRoute(ctx *routeContext) (*eskip.Route, error) {
gr := ctx.groupRoute
r := &eskip.Route{Id: ctx.id}

// Path or PathSubtree, prefer Path if we have, because it is more specifc
// Path or PathSubtree, prefer Path if we have, because it is more specific
if gr.Path != "" {
r.Predicates = appendPredicate(r.Predicates, "Path", gr.Path)
} else if gr.PathSubtree != "" {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
### Note

In this test the directory is a file, so that readdir() fails. Error should be loged.
In this test the directory is a file, so that readdir() fails. Error should be logged.
4 changes: 2 additions & 2 deletions docs/kubernetes/ingress-usage.md
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ Custom routes specified in ingress will always add the `Host()`
[predicate](../reference/predicates.md#host) to match the host header specified in
the ingress `rules:`. If there is a `path:` definition in your
ingress, then it will be based on the skipper command line parameter
`-kubernetes-path-mode` set one of theses predicates:
`-kubernetes-path-mode` set one of these predicates:

- [Path()](../reference/predicates.md#path)
- [PathSubtree()](../reference/predicates.md#pathsubtree)
Expand Down Expand Up @@ -672,7 +672,7 @@ and in our [ratelimit tutorial](../tutorials/ratelimit.md).
#### Client Ratelimits

The example shows 20 calls per hour per client, based on
X-Forwarded-For header or IP incase there is no X-Forwarded-For header
X-Forwarded-For header or IP in case there is no X-Forwarded-For header
set, are allowed to each skipper instance for the given ingress.

```yaml
Expand Down
2 changes: 1 addition & 1 deletion docs/operation/operation.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ the number for all backends such that we do not run out of sockets.

This will set DisableKeepAlives on the
[http.Transport](https://golang.org/pkg/net/http/#Transport) to disable
HTTP keep-alives and to only use the connection for single request.
HTTP keep-alive and to only use the connection for single request.

-max-idle-connection-backend int
sets the maximum idle connections for all backend connections
Expand Down
2 changes: 1 addition & 1 deletion docs/reference/development.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
## How to develop a Filter

A filter is part of a route and can change arbitary http data in the
A filter is part of a route and can change arbitrary http data in the
`http.Request` and `http.Response` path of a proxy.

The filter example shows a non trivial diff of a filter
Expand Down
12 changes: 6 additions & 6 deletions docs/reference/filters.md
Original file line number Diff line number Diff line change
Expand Up @@ -964,7 +964,7 @@ The content type will be automatically detected when not provided.

Block a request based on it's body content.

The filter max buffer size is 2MiB by default and can be overidden with `-max-matcher-buffer-size=<int>`.
The filter max buffer size is 2MiB by default and can be overridden with `-max-matcher-buffer-size=<int>`.

Parameters:

Expand All @@ -980,7 +980,7 @@ Example:

Block a request based on it's body content.

The filter max buffer size is 2MiB by default and can be overidden with `-max-matcher-buffer-size=<int>`.
The filter max buffer size is 2MiB by default and can be overridden with `-max-matcher-buffer-size=<int>`.

Parameters:

Expand All @@ -995,7 +995,7 @@ Example:

### sed

The filter sed replaces all occurences of a pattern with a replacement string
The filter sed replaces all occurrences of a pattern with a replacement string
in the response body.

Example:
Expand All @@ -1011,7 +1011,7 @@ editorRoute: * -> sed("foo", "bar", 64000000) -> "https://www.example.org";
```

This filter expects a regexp pattern and a replacement string as arguments.
During the streaming of the response body, every occurence of the pattern will
During the streaming of the response body, every occurrence of the pattern will
be replaced with the replacement string. The editing doesn't happen right when
the filter is executed, only later when the streaming normally happens, after
all response filters were called.
Expand Down Expand Up @@ -1661,7 +1661,7 @@ oidcClaimsQuery("<path>:[<query>]", ...)
```

The filter is chained after `oauthOidc*` authentication as it parses the ID token that has been saved in the internal `StateBag` for this request. It validates access control of the requested path against the defined query.
It accepts one or more arguments, thats is a path prefix which is granted access to when the query definition evaluates positive.
It accepts one or more arguments, that is a path prefix which is granted access to when the query definition evaluates positive.
It supports exact matches of keys, key-value pairs, introspecting of arrays or exact and wildcard matching of nested structures.
The query definition can be one or more queries per path, space delimited. The query syntax is [GJSON](https://github.com/tidwall/gjson/blob/master/SYNTAX.md) with a convenience modifier of `@_` which unfolds to `[@this].#("+arg+")`

Expand Down Expand Up @@ -2225,7 +2225,7 @@ This enables logs of all requests with status codes `1xxs`, `301` and all `20xs`
### auditLog

Filter `auditLog()` logs the request and N bytes of the body into the
log file. N defaults to 1024 and can be overidden with
log file. N defaults to 1024 and can be overridden with
`-max-audit-body=<int>`. `N=0` omits logging the body.

Example:
Expand Down
6 changes: 3 additions & 3 deletions docs/reference/predicates.md
Original file line number Diff line number Diff line change
Expand Up @@ -530,7 +530,7 @@ Examples:
Cron("* * * * *")
// match only when the hour is between 5-7 (inclusive)
Cron("* 5-7, * * *")
// match only when the hour is between 5-7, equal to 8, or betweeen 12-15
// match only when the hour is between 5-7, equal to 8, or between 12-15
Cron("* 5-7,8,12-15 * * *")
// match only when it is weekdays
Cron("* * * * 1-5")
Expand Down Expand Up @@ -664,7 +664,7 @@ ignoring the chance argument.
Parameters:

* Traffic (decimal) valid values [0.0, 1.0]
* Traffic (decimal, string, string) session stickyness
* Traffic (decimal, string, string) session stickiness

Examples:

Expand All @@ -682,7 +682,7 @@ v1:
"https://api-test-blue";
```

stickyness:
stickiness:

```
// hit by 5% percent chance
Expand Down
8 changes: 4 additions & 4 deletions docs/tutorials/auth.md
Original file line number Diff line number Diff line change
Expand Up @@ -182,12 +182,12 @@ oauthOidcUserInfo("https://oidc-provider.example.com", "client_id", "client_secr
```
This filter is similar but it verifies that the token has certain user information
information fields accesible with the token return by the provider. The fields can
information fields accessible with the token return by the provider. The fields can
be specified at the end like in the example above where the fields `name`, `email`
and `picture` are requested.
Upon sucessful authentication Skipper will start allowing the user requests through
to the backend. Along with the orginal request to the backend Skipper will include
Upon successful authentication Skipper will start allowing the user requests through
to the backend. Along with the original request to the backend Skipper will include
information which it obtained from the provider. The information is in `JSON` format
with the header name `Skipper-Oidc-Info`. In the case of the claims container the
header value is in the format.
Expand Down Expand Up @@ -310,7 +310,7 @@ Skipper must be configured with the following credentials and secrets:
1. OAuth2 client secret for authenticating with the OAuth2 provider.
1. Cookie encryption secret for encrypting and decrypting token cookies.
You can load all of theses secrets from separate files, in which case they get automatically
You can load all of these secrets from separate files, in which case they get automatically
reloaded to support secret rotation. You can provide the paths to the files containing each
secret as follows:
Expand Down
2 changes: 1 addition & 1 deletion docs/tutorials/basics.md
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,7 @@ If there is more than 1024 routes used, then the paging the results is
possible with the `offset` and `limit` query parameters:
```
curl locahost:9911/routes?offset=2048&limit=512
curl localhost:9911/routes?offset=2048&limit=512
```
### Route IDs
Expand Down
2 changes: 1 addition & 1 deletion docs/tutorials/development.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ For *Visual Studion Code* users, a simple setup could be to create following *la
"-inline-routes=PathSubtree(\"/\") -> inlineContent(\"Hello World\") -> <shunt>",
// example OIDC setup, using https://developer.microsoft.com/en-us/microsoft-365/dev-program
// "-oidc-secrets-file=${workspaceFolder}/.vscode/launch.json",
// "-inline-routes=* -> oauthOidcAnyClaims(\"https://login.microsoftonline.com/<tenant Id>/v2.0\",\"<application id>\",\"<client secret>\",\"http://localhost:9999/authcallback\", \"profile\", \"\", \"\", \"x-auth-email:claims.email x-groups:claims.groups\") -> inlineContent(\"restriced access\") -> <shunt>",
// "-inline-routes=* -> oauthOidcAnyClaims(\"https://login.microsoftonline.com/<tenant Id>/v2.0\",\"<application id>\",\"<client secret>\",\"http://localhost:9999/authcallback\", \"profile\", \"\", \"\", \"x-auth-email:claims.email x-groups:claims.groups\") -> inlineContent(\"restricted access\") -> <shunt>",
],
"serverReadyAction": {
"pattern": "route settings applied",
Expand Down
2 changes: 1 addition & 1 deletion eskip/lexer.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ func scanRegexp(code string) ([]byte, string) {
}

if escaped {
//delimeter / is escaped in PathRegexp so that it means no end PathRegexp(/\//)
//delimiter / is escaped in PathRegexp so that it means no end PathRegexp(/\//)
if !isDelimiter && !isEscapeChar {
b = append(b, escapeChar)
}
Expand Down
2 changes: 1 addition & 1 deletion eskipfile/remote.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ import (
log "github.com/sirupsen/logrus"
)

var errContentNotChanged = errors.New("content in cache did not change, 304 reponse status code")
var errContentNotChanged = errors.New("content in cache did not change, 304 response status code")

type remoteEskipFile struct {
once sync.Once
Expand Down
2 changes: 1 addition & 1 deletion filters/apiusagemonitoring/spec_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -572,7 +572,7 @@ func Test_CreatePathPattern(t *testing.T) {
originalPath: "//bas//customers///:customer-id//",
expectedPathPattern: "^/*bas/+customers/+.+/*$",
},
"with escape caracters": {
"with escape characters": {
originalPath: "/bas/*(cust\\omers)?.id+/",
expectedPathPattern: "^/*bas/+\\*\\(cust\\\\omers\\)\\?\\.id\\+/*$",
},
Expand Down
2 changes: 1 addition & 1 deletion filters/auth/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ filter after the auth filter.
The filter webhook allows you to have a custom authentication and
authorization endpoint for a route. Headers from the webhook response
can be copyied into the continuing request by specifying the
can be copied into the continuing request by specifying the
headers to copy as an optional second argument to the filter
a: Path("/only-allowed-by-webhook") -> webhook("https://custom-webhook.example.org/auth") -> "https://protected-backend.example.org/";
Expand Down
2 changes: 1 addition & 1 deletion filters/auth/forwardtokenfield_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,6 @@ func TestForwardFieldFieldEmpty(t *testing.T) {
f.Request(c)

if c.FRequest.Header.Get("Header1") != "blbabla" {
t.Fatalf("Header1 should not be overriden")
t.Fatalf("Header1 should not be overridden")
}
}
2 changes: 1 addition & 1 deletion filters/fadein/fadein.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ func (endpointCreated) CreateFilter(args []interface{}) (filters.Filter, error)
return nil, filters.ErrInvalidFilterParameters
}

// mitigate potential flakyness caused by clock skew. When the created time is in the future based on
// mitigate potential flakiness caused by clock skew. When the created time is in the future based on
// the local clock, we ignore it.
now := time.Now()
if ec.when.After(now) {
Expand Down
2 changes: 1 addition & 1 deletion filters/flowid/filter.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ func (spec *flowIdSpec) CreateFilter(fc []interface{}) (filters.Filter, error) {
return nil, filters.ErrInvalidFilterParameters
}
if len(fc) > 1 {
log.Println("flow id filter warning: this syntaxt is deprecated and will be removed soon. " +
log.Println("flow id filter warning: this syntax is deprecated and will be removed soon. " +
"please check updated docs")
}
}
Expand Down
2 changes: 1 addition & 1 deletion filters/ratelimit/ratelimit.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
Package ratelimit provides filters to control the rate limitter settings on the route level.
Package ratelimit provides filters to control the rate limiter settings on the route level.
For detailed documentation of the ratelimit, see https://godoc.org/github.com/zalando/skipper/ratelimit.
*/
Expand Down
4 changes: 2 additions & 2 deletions filters/scheduler/fifo_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ func TestFifo(t *testing.T) {
t.Fatalf("Failed to parse filter: %v", err)
}
if err == nil && tt.wantParseErr {
t.Fatalf("want parse error but hav no: %v", err)
t.Fatalf("want parse error but have no: %v", err)
}
if tt.wantParseErr {
return
Expand Down Expand Up @@ -375,7 +375,7 @@ func TestConstantRouteUpdatesFifo(t *testing.T) {
t.Fatalf("Failed to parse filter: %v", err)
}
if err == nil && tt.wantParseErr {
t.Fatalf("want parse error but hav no: %v", err)
t.Fatalf("want parse error but have no: %v", err)
}
if tt.wantParseErr {
return
Expand Down
4 changes: 2 additions & 2 deletions filters/scheduler/lifo.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func (s *lifoSpec) Name() string { return filters.LifoName }
// MaxConcurrency and MaxQueueSize: total max = MaxConcurrency + MaxQueueSize
//
// Min values are 1 for MaxConcurrency and MaxQueueSize, and 1ms for
// Timeout. All configration that is below will be set to these min
// Timeout. All configuration that is below will be set to these min
// values.
func (s *lifoSpec) CreateFilter(args []interface{}) (filters.Filter, error) {
var l lifoFilter
Expand Down Expand Up @@ -153,7 +153,7 @@ func (*lifoGroupSpec) Name() string { return filters.LifoGroupName }
// MaxConcurrency and MaxQueueSize: total max = MaxConcurrency + MaxQueueSize
//
// Min values are 1 for MaxConcurrency and MaxQueueSize, and 1ms for
// Timeout. All configration that is below will be set to these min
// Timeout. All configuration that is below will be set to these min
// values.
//
// It is enough to set the concurrency, queue size and timeout parameters for
Expand Down
2 changes: 1 addition & 1 deletion filters/scheduler/lifo_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ func TestNewLIFO(t *testing.T) {
t.Errorf("Queue should not be nil, got: %v", q)
}
} else {
t.Fatalf("Failed to get lifoFilter ot lifoGroupFilter from filter: %v, ok: %v", f, ok)
t.Fatalf("Failed to get lifoFilter or lifoGroupFilter from filter: %v, ok: %v", f, ok)
}

backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expand Down
2 changes: 1 addition & 1 deletion filters/sed/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ Example with larger max buffer:
editorRoute: * -> sed("foo", "bar", 64000000) -> "https://www.example.org";
This filter expects a regexp pattern and a replacement string as arguments.
During the streaming of the response body, every occurence of the pattern will
During the streaming of the response body, every occurrence of the pattern will
be replaced with the replacement string. The editing doesn't happen right when
the filter is executed, only later when the streaming normally happens, after
all response filters were called.
Expand Down
4 changes: 2 additions & 2 deletions filters/sed/editor.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,13 @@ const (
maxBufferAbort
)

// editor provides a reader that wraps an input reader, and replaces each occurence of
// editor provides a reader that wraps an input reader, and replaces each occurrence of
// the provided search pattern with the provided replacement. It can be used with a
// delimiter or without.
//
// When using it with a delimiter, it reads enough data from the input until meeting
// a delimiter or reaching maxBufferSize. The chunk includes the delimiter if any. Then
// every occurence of the pattern is replaced, and the entire edited chunk is returned
// every occurrence of the pattern is replaced, and the entire edited chunk is returned
// to the caller.
//
// When not using a delimiter, it reads enough data until at least a complete match of the
Expand Down
2 changes: 1 addition & 1 deletion loadbalancer/fadein_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ func testFadeIn(
}

// Print CSV-like output for, where row number represents time and
// column represents endpoint. You can vizualize it using
// column represents endpoint. You can visualize it using
// ./skptesting/run_fadein_test.sh from the skipper repo root.
t.Log("CSV " + fmt.Sprintf("%d,", i) + strings.Join(showStats, ","))
}
Expand Down
Loading

0 comments on commit 311e661

Please sign in to comment.